core.c 130 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Copyright(c) 2019-2020 Realtek Corporation
  3. */
  4. #include <linux/ip.h>
  5. #include <linux/udp.h>
  6. #include "cam.h"
  7. #include "chan.h"
  8. #include "coex.h"
  9. #include "core.h"
  10. #include "efuse.h"
  11. #include "fw.h"
  12. #include "mac.h"
  13. #include "phy.h"
  14. #include "ps.h"
  15. #include "reg.h"
  16. #include "sar.h"
  17. #include "ser.h"
  18. #include "txrx.h"
  19. #include "util.h"
  20. static bool rtw89_disable_ps_mode;
  21. module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644);
  22. MODULE_PARM_DESC(disable_ps_mode, "Set Y to disable low power mode");
  23. #define RTW89_DEF_CHAN(_freq, _hw_val, _flags, _band) \
  24. { .center_freq = _freq, .hw_value = _hw_val, .flags = _flags, .band = _band, }
  25. #define RTW89_DEF_CHAN_2G(_freq, _hw_val) \
  26. RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_2GHZ)
  27. #define RTW89_DEF_CHAN_5G(_freq, _hw_val) \
  28. RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_5GHZ)
  29. #define RTW89_DEF_CHAN_5G_NO_HT40MINUS(_freq, _hw_val) \
  30. RTW89_DEF_CHAN(_freq, _hw_val, IEEE80211_CHAN_NO_HT40MINUS, NL80211_BAND_5GHZ)
  31. #define RTW89_DEF_CHAN_6G(_freq, _hw_val) \
  32. RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_6GHZ)
  33. static struct ieee80211_channel rtw89_channels_2ghz[] = {
  34. RTW89_DEF_CHAN_2G(2412, 1),
  35. RTW89_DEF_CHAN_2G(2417, 2),
  36. RTW89_DEF_CHAN_2G(2422, 3),
  37. RTW89_DEF_CHAN_2G(2427, 4),
  38. RTW89_DEF_CHAN_2G(2432, 5),
  39. RTW89_DEF_CHAN_2G(2437, 6),
  40. RTW89_DEF_CHAN_2G(2442, 7),
  41. RTW89_DEF_CHAN_2G(2447, 8),
  42. RTW89_DEF_CHAN_2G(2452, 9),
  43. RTW89_DEF_CHAN_2G(2457, 10),
  44. RTW89_DEF_CHAN_2G(2462, 11),
  45. RTW89_DEF_CHAN_2G(2467, 12),
  46. RTW89_DEF_CHAN_2G(2472, 13),
  47. RTW89_DEF_CHAN_2G(2484, 14),
  48. };
  49. static struct ieee80211_channel rtw89_channels_5ghz[] = {
  50. RTW89_DEF_CHAN_5G(5180, 36),
  51. RTW89_DEF_CHAN_5G(5200, 40),
  52. RTW89_DEF_CHAN_5G(5220, 44),
  53. RTW89_DEF_CHAN_5G(5240, 48),
  54. RTW89_DEF_CHAN_5G(5260, 52),
  55. RTW89_DEF_CHAN_5G(5280, 56),
  56. RTW89_DEF_CHAN_5G(5300, 60),
  57. RTW89_DEF_CHAN_5G(5320, 64),
  58. RTW89_DEF_CHAN_5G(5500, 100),
  59. RTW89_DEF_CHAN_5G(5520, 104),
  60. RTW89_DEF_CHAN_5G(5540, 108),
  61. RTW89_DEF_CHAN_5G(5560, 112),
  62. RTW89_DEF_CHAN_5G(5580, 116),
  63. RTW89_DEF_CHAN_5G(5600, 120),
  64. RTW89_DEF_CHAN_5G(5620, 124),
  65. RTW89_DEF_CHAN_5G(5640, 128),
  66. RTW89_DEF_CHAN_5G(5660, 132),
  67. RTW89_DEF_CHAN_5G(5680, 136),
  68. RTW89_DEF_CHAN_5G(5700, 140),
  69. RTW89_DEF_CHAN_5G(5720, 144),
  70. RTW89_DEF_CHAN_5G(5745, 149),
  71. RTW89_DEF_CHAN_5G(5765, 153),
  72. RTW89_DEF_CHAN_5G(5785, 157),
  73. RTW89_DEF_CHAN_5G(5805, 161),
  74. RTW89_DEF_CHAN_5G_NO_HT40MINUS(5825, 165),
  75. RTW89_DEF_CHAN_5G(5845, 169),
  76. RTW89_DEF_CHAN_5G(5865, 173),
  77. RTW89_DEF_CHAN_5G(5885, 177),
  78. };
  79. static struct ieee80211_channel rtw89_channels_6ghz[] = {
  80. RTW89_DEF_CHAN_6G(5955, 1),
  81. RTW89_DEF_CHAN_6G(5975, 5),
  82. RTW89_DEF_CHAN_6G(5995, 9),
  83. RTW89_DEF_CHAN_6G(6015, 13),
  84. RTW89_DEF_CHAN_6G(6035, 17),
  85. RTW89_DEF_CHAN_6G(6055, 21),
  86. RTW89_DEF_CHAN_6G(6075, 25),
  87. RTW89_DEF_CHAN_6G(6095, 29),
  88. RTW89_DEF_CHAN_6G(6115, 33),
  89. RTW89_DEF_CHAN_6G(6135, 37),
  90. RTW89_DEF_CHAN_6G(6155, 41),
  91. RTW89_DEF_CHAN_6G(6175, 45),
  92. RTW89_DEF_CHAN_6G(6195, 49),
  93. RTW89_DEF_CHAN_6G(6215, 53),
  94. RTW89_DEF_CHAN_6G(6235, 57),
  95. RTW89_DEF_CHAN_6G(6255, 61),
  96. RTW89_DEF_CHAN_6G(6275, 65),
  97. RTW89_DEF_CHAN_6G(6295, 69),
  98. RTW89_DEF_CHAN_6G(6315, 73),
  99. RTW89_DEF_CHAN_6G(6335, 77),
  100. RTW89_DEF_CHAN_6G(6355, 81),
  101. RTW89_DEF_CHAN_6G(6375, 85),
  102. RTW89_DEF_CHAN_6G(6395, 89),
  103. RTW89_DEF_CHAN_6G(6415, 93),
  104. RTW89_DEF_CHAN_6G(6435, 97),
  105. RTW89_DEF_CHAN_6G(6455, 101),
  106. RTW89_DEF_CHAN_6G(6475, 105),
  107. RTW89_DEF_CHAN_6G(6495, 109),
  108. RTW89_DEF_CHAN_6G(6515, 113),
  109. RTW89_DEF_CHAN_6G(6535, 117),
  110. RTW89_DEF_CHAN_6G(6555, 121),
  111. RTW89_DEF_CHAN_6G(6575, 125),
  112. RTW89_DEF_CHAN_6G(6595, 129),
  113. RTW89_DEF_CHAN_6G(6615, 133),
  114. RTW89_DEF_CHAN_6G(6635, 137),
  115. RTW89_DEF_CHAN_6G(6655, 141),
  116. RTW89_DEF_CHAN_6G(6675, 145),
  117. RTW89_DEF_CHAN_6G(6695, 149),
  118. RTW89_DEF_CHAN_6G(6715, 153),
  119. RTW89_DEF_CHAN_6G(6735, 157),
  120. RTW89_DEF_CHAN_6G(6755, 161),
  121. RTW89_DEF_CHAN_6G(6775, 165),
  122. RTW89_DEF_CHAN_6G(6795, 169),
  123. RTW89_DEF_CHAN_6G(6815, 173),
  124. RTW89_DEF_CHAN_6G(6835, 177),
  125. RTW89_DEF_CHAN_6G(6855, 181),
  126. RTW89_DEF_CHAN_6G(6875, 185),
  127. RTW89_DEF_CHAN_6G(6895, 189),
  128. RTW89_DEF_CHAN_6G(6915, 193),
  129. RTW89_DEF_CHAN_6G(6935, 197),
  130. RTW89_DEF_CHAN_6G(6955, 201),
  131. RTW89_DEF_CHAN_6G(6975, 205),
  132. RTW89_DEF_CHAN_6G(6995, 209),
  133. RTW89_DEF_CHAN_6G(7015, 213),
  134. RTW89_DEF_CHAN_6G(7035, 217),
  135. RTW89_DEF_CHAN_6G(7055, 221),
  136. RTW89_DEF_CHAN_6G(7075, 225),
  137. RTW89_DEF_CHAN_6G(7095, 229),
  138. RTW89_DEF_CHAN_6G(7115, 233),
  139. };
  140. static struct ieee80211_rate rtw89_bitrates[] = {
  141. { .bitrate = 10, .hw_value = 0x00, },
  142. { .bitrate = 20, .hw_value = 0x01, },
  143. { .bitrate = 55, .hw_value = 0x02, },
  144. { .bitrate = 110, .hw_value = 0x03, },
  145. { .bitrate = 60, .hw_value = 0x04, },
  146. { .bitrate = 90, .hw_value = 0x05, },
  147. { .bitrate = 120, .hw_value = 0x06, },
  148. { .bitrate = 180, .hw_value = 0x07, },
  149. { .bitrate = 240, .hw_value = 0x08, },
  150. { .bitrate = 360, .hw_value = 0x09, },
  151. { .bitrate = 480, .hw_value = 0x0a, },
  152. { .bitrate = 540, .hw_value = 0x0b, },
  153. };
  154. static const struct ieee80211_iface_limit rtw89_iface_limits[] = {
  155. {
  156. .max = 1,
  157. .types = BIT(NL80211_IFTYPE_STATION),
  158. },
  159. {
  160. .max = 1,
  161. .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
  162. BIT(NL80211_IFTYPE_P2P_GO) |
  163. BIT(NL80211_IFTYPE_AP),
  164. },
  165. };
  166. static const struct ieee80211_iface_limit rtw89_iface_limits_mcc[] = {
  167. {
  168. .max = 1,
  169. .types = BIT(NL80211_IFTYPE_STATION),
  170. },
  171. {
  172. .max = 1,
  173. .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
  174. BIT(NL80211_IFTYPE_P2P_GO),
  175. },
  176. };
  177. static const struct ieee80211_iface_combination rtw89_iface_combs[] = {
  178. {
  179. .limits = rtw89_iface_limits,
  180. .n_limits = ARRAY_SIZE(rtw89_iface_limits),
  181. .max_interfaces = 2,
  182. .num_different_channels = 1,
  183. },
  184. {
  185. .limits = rtw89_iface_limits_mcc,
  186. .n_limits = ARRAY_SIZE(rtw89_iface_limits_mcc),
  187. .max_interfaces = 2,
  188. .num_different_channels = 2,
  189. },
  190. };
  191. bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate)
  192. {
  193. struct ieee80211_rate rate;
  194. if (unlikely(rpt_rate >= ARRAY_SIZE(rtw89_bitrates))) {
  195. rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "invalid rpt rate %d\n", rpt_rate);
  196. return false;
  197. }
  198. rate = rtw89_bitrates[rpt_rate];
  199. *bitrate = rate.bitrate;
  200. return true;
  201. }
  202. static const struct ieee80211_supported_band rtw89_sband_2ghz = {
  203. .band = NL80211_BAND_2GHZ,
  204. .channels = rtw89_channels_2ghz,
  205. .n_channels = ARRAY_SIZE(rtw89_channels_2ghz),
  206. .bitrates = rtw89_bitrates,
  207. .n_bitrates = ARRAY_SIZE(rtw89_bitrates),
  208. .ht_cap = {0},
  209. .vht_cap = {0},
  210. };
  211. static const struct ieee80211_supported_band rtw89_sband_5ghz = {
  212. .band = NL80211_BAND_5GHZ,
  213. .channels = rtw89_channels_5ghz,
  214. .n_channels = ARRAY_SIZE(rtw89_channels_5ghz),
  215. /* 5G has no CCK rates, 1M/2M/5.5M/11M */
  216. .bitrates = rtw89_bitrates + 4,
  217. .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4,
  218. .ht_cap = {0},
  219. .vht_cap = {0},
  220. };
  221. static const struct ieee80211_supported_band rtw89_sband_6ghz = {
  222. .band = NL80211_BAND_6GHZ,
  223. .channels = rtw89_channels_6ghz,
  224. .n_channels = ARRAY_SIZE(rtw89_channels_6ghz),
  225. /* 6G has no CCK rates, 1M/2M/5.5M/11M */
  226. .bitrates = rtw89_bitrates + 4,
  227. .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4,
  228. };
  229. static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
  230. struct rtw89_traffic_stats *stats,
  231. struct sk_buff *skb, bool tx)
  232. {
  233. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  234. if (!ieee80211_is_data(hdr->frame_control))
  235. return;
  236. if (is_broadcast_ether_addr(hdr->addr1) ||
  237. is_multicast_ether_addr(hdr->addr1))
  238. return;
  239. if (tx) {
  240. stats->tx_cnt++;
  241. stats->tx_unicast += skb->len;
  242. } else {
  243. stats->rx_cnt++;
  244. stats->rx_unicast += skb->len;
  245. }
  246. }
  247. void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef)
  248. {
  249. cfg80211_chandef_create(chandef, &rtw89_channels_2ghz[0],
  250. NL80211_CHAN_NO_HT);
  251. }
  252. void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
  253. struct rtw89_chan *chan)
  254. {
  255. struct ieee80211_channel *channel = chandef->chan;
  256. enum nl80211_chan_width width = chandef->width;
  257. u32 primary_freq, center_freq;
  258. u8 center_chan;
  259. u8 bandwidth = RTW89_CHANNEL_WIDTH_20;
  260. u32 offset;
  261. u8 band;
  262. center_chan = channel->hw_value;
  263. primary_freq = channel->center_freq;
  264. center_freq = chandef->center_freq1;
  265. switch (width) {
  266. case NL80211_CHAN_WIDTH_20_NOHT:
  267. case NL80211_CHAN_WIDTH_20:
  268. bandwidth = RTW89_CHANNEL_WIDTH_20;
  269. break;
  270. case NL80211_CHAN_WIDTH_40:
  271. bandwidth = RTW89_CHANNEL_WIDTH_40;
  272. if (primary_freq > center_freq) {
  273. center_chan -= 2;
  274. } else {
  275. center_chan += 2;
  276. }
  277. break;
  278. case NL80211_CHAN_WIDTH_80:
  279. case NL80211_CHAN_WIDTH_160:
  280. bandwidth = nl_to_rtw89_bandwidth(width);
  281. if (primary_freq > center_freq) {
  282. offset = (primary_freq - center_freq - 10) / 20;
  283. center_chan -= 2 + offset * 4;
  284. } else {
  285. offset = (center_freq - primary_freq - 10) / 20;
  286. center_chan += 2 + offset * 4;
  287. }
  288. break;
  289. default:
  290. center_chan = 0;
  291. break;
  292. }
  293. switch (channel->band) {
  294. default:
  295. case NL80211_BAND_2GHZ:
  296. band = RTW89_BAND_2G;
  297. break;
  298. case NL80211_BAND_5GHZ:
  299. band = RTW89_BAND_5G;
  300. break;
  301. case NL80211_BAND_6GHZ:
  302. band = RTW89_BAND_6G;
  303. break;
  304. }
  305. rtw89_chan_create(chan, center_chan, channel->hw_value, band, bandwidth);
  306. }
  307. void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev)
  308. {
  309. struct rtw89_hal *hal = &rtwdev->hal;
  310. const struct rtw89_chip_info *chip = rtwdev->chip;
  311. const struct rtw89_chan *chan;
  312. enum rtw89_sub_entity_idx sub_entity_idx;
  313. enum rtw89_sub_entity_idx roc_idx;
  314. enum rtw89_phy_idx phy_idx;
  315. enum rtw89_entity_mode mode;
  316. bool entity_active;
  317. entity_active = rtw89_get_entity_state(rtwdev);
  318. if (!entity_active)
  319. return;
  320. mode = rtw89_get_entity_mode(rtwdev);
  321. switch (mode) {
  322. case RTW89_ENTITY_MODE_SCC:
  323. case RTW89_ENTITY_MODE_MCC:
  324. sub_entity_idx = RTW89_SUB_ENTITY_0;
  325. break;
  326. case RTW89_ENTITY_MODE_MCC_PREPARE:
  327. sub_entity_idx = RTW89_SUB_ENTITY_1;
  328. break;
  329. default:
  330. WARN(1, "Invalid ent mode: %d\n", mode);
  331. return;
  332. }
  333. roc_idx = atomic_read(&hal->roc_entity_idx);
  334. if (roc_idx != RTW89_SUB_ENTITY_IDLE)
  335. sub_entity_idx = roc_idx;
  336. phy_idx = RTW89_PHY_0;
  337. chan = rtw89_chan_get(rtwdev, sub_entity_idx);
  338. chip->ops->set_txpwr(rtwdev, chan, phy_idx);
  339. }
  340. void rtw89_set_channel(struct rtw89_dev *rtwdev)
  341. {
  342. struct rtw89_hal *hal = &rtwdev->hal;
  343. const struct rtw89_chip_info *chip = rtwdev->chip;
  344. const struct rtw89_chan_rcd *chan_rcd;
  345. const struct rtw89_chan *chan;
  346. enum rtw89_sub_entity_idx sub_entity_idx;
  347. enum rtw89_sub_entity_idx roc_idx;
  348. enum rtw89_mac_idx mac_idx;
  349. enum rtw89_phy_idx phy_idx;
  350. struct rtw89_channel_help_params bak;
  351. enum rtw89_entity_mode mode;
  352. bool entity_active;
  353. entity_active = rtw89_get_entity_state(rtwdev);
  354. mode = rtw89_entity_recalc(rtwdev);
  355. switch (mode) {
  356. case RTW89_ENTITY_MODE_SCC:
  357. case RTW89_ENTITY_MODE_MCC:
  358. sub_entity_idx = RTW89_SUB_ENTITY_0;
  359. break;
  360. case RTW89_ENTITY_MODE_MCC_PREPARE:
  361. sub_entity_idx = RTW89_SUB_ENTITY_1;
  362. break;
  363. default:
  364. WARN(1, "Invalid ent mode: %d\n", mode);
  365. return;
  366. }
  367. roc_idx = atomic_read(&hal->roc_entity_idx);
  368. if (roc_idx != RTW89_SUB_ENTITY_IDLE)
  369. sub_entity_idx = roc_idx;
  370. mac_idx = RTW89_MAC_0;
  371. phy_idx = RTW89_PHY_0;
  372. chan = rtw89_chan_get(rtwdev, sub_entity_idx);
  373. chan_rcd = rtw89_chan_rcd_get(rtwdev, sub_entity_idx);
  374. rtw89_chip_set_channel_prepare(rtwdev, &bak, chan, mac_idx, phy_idx);
  375. chip->ops->set_channel(rtwdev, chan, mac_idx, phy_idx);
  376. chip->ops->set_txpwr(rtwdev, chan, phy_idx);
  377. rtw89_chip_set_channel_done(rtwdev, &bak, chan, mac_idx, phy_idx);
  378. if (!entity_active || chan_rcd->band_changed) {
  379. rtw89_btc_ntfy_switch_band(rtwdev, phy_idx, chan->band_type);
  380. rtw89_chip_rfk_band_changed(rtwdev, phy_idx);
  381. }
  382. rtw89_set_entity_state(rtwdev, true);
  383. }
  384. void rtw89_get_channel(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
  385. struct rtw89_chan *chan)
  386. {
  387. const struct cfg80211_chan_def *chandef;
  388. chandef = rtw89_chandef_get(rtwdev, rtwvif->sub_entity_idx);
  389. rtw89_get_channel_params(chandef, chan);
  390. }
  391. static enum rtw89_core_tx_type
  392. rtw89_core_get_tx_type(struct rtw89_dev *rtwdev,
  393. struct sk_buff *skb)
  394. {
  395. struct ieee80211_hdr *hdr = (void *)skb->data;
  396. __le16 fc = hdr->frame_control;
  397. if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
  398. return RTW89_CORE_TX_TYPE_MGMT;
  399. return RTW89_CORE_TX_TYPE_DATA;
  400. }
  401. static void
  402. rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev,
  403. struct rtw89_core_tx_request *tx_req,
  404. enum btc_pkt_type pkt_type)
  405. {
  406. struct ieee80211_sta *sta = tx_req->sta;
  407. struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
  408. struct sk_buff *skb = tx_req->skb;
  409. struct rtw89_sta *rtwsta;
  410. u8 ampdu_num;
  411. u8 tid;
  412. if (pkt_type == PACKET_EAPOL) {
  413. desc_info->bk = true;
  414. return;
  415. }
  416. if (!(IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU))
  417. return;
  418. if (!sta) {
  419. rtw89_warn(rtwdev, "cannot set ampdu info without sta\n");
  420. return;
  421. }
  422. tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
  423. rtwsta = (struct rtw89_sta *)sta->drv_priv;
  424. ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ?
  425. rtwsta->ampdu_params[tid].agg_num :
  426. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  427. 4 << sta->ht_cap.ampdu_factor) - 1);
  428. #else
  429. 4 << sta->deflink.ht_cap.ampdu_factor) - 1);
  430. #endif
  431. desc_info->agg_en = true;
  432. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  433. desc_info->ampdu_density = sta->ht_cap.ampdu_density;
  434. #else
  435. desc_info->ampdu_density = sta->deflink.ht_cap.ampdu_density;
  436. #endif
  437. desc_info->ampdu_num = ampdu_num;
  438. }
  439. static void
  440. rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
  441. struct rtw89_core_tx_request *tx_req)
  442. {
  443. const struct rtw89_chip_info *chip = rtwdev->chip;
  444. struct ieee80211_vif *vif = tx_req->vif;
  445. struct ieee80211_sta *sta = tx_req->sta;
  446. struct ieee80211_tx_info *info;
  447. struct ieee80211_key_conf *key;
  448. struct rtw89_vif *rtwvif;
  449. struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
  450. struct rtw89_addr_cam_entry *addr_cam;
  451. struct rtw89_sec_cam_entry *sec_cam;
  452. struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
  453. struct sk_buff *skb = tx_req->skb;
  454. u8 sec_type = RTW89_SEC_KEY_TYPE_NONE;
  455. u64 pn64;
  456. if (!vif) {
  457. rtw89_warn(rtwdev, "cannot set sec key without vif\n");
  458. return;
  459. }
  460. rtwvif = (struct rtw89_vif *)vif->drv_priv;
  461. addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
  462. info = IEEE80211_SKB_CB(skb);
  463. key = info->control.hw_key;
  464. sec_cam = addr_cam->sec_entries[key->hw_key_idx];
  465. if (!sec_cam) {
  466. rtw89_warn(rtwdev, "sec cam entry is empty\n");
  467. return;
  468. }
  469. switch (key->cipher) {
  470. case WLAN_CIPHER_SUITE_WEP40:
  471. sec_type = RTW89_SEC_KEY_TYPE_WEP40;
  472. break;
  473. case WLAN_CIPHER_SUITE_WEP104:
  474. sec_type = RTW89_SEC_KEY_TYPE_WEP104;
  475. break;
  476. case WLAN_CIPHER_SUITE_TKIP:
  477. sec_type = RTW89_SEC_KEY_TYPE_TKIP;
  478. break;
  479. case WLAN_CIPHER_SUITE_CCMP:
  480. sec_type = RTW89_SEC_KEY_TYPE_CCMP128;
  481. break;
  482. case WLAN_CIPHER_SUITE_CCMP_256:
  483. sec_type = RTW89_SEC_KEY_TYPE_CCMP256;
  484. break;
  485. case WLAN_CIPHER_SUITE_GCMP:
  486. sec_type = RTW89_SEC_KEY_TYPE_GCMP128;
  487. break;
  488. case WLAN_CIPHER_SUITE_GCMP_256:
  489. sec_type = RTW89_SEC_KEY_TYPE_GCMP256;
  490. break;
  491. default:
  492. rtw89_warn(rtwdev, "key cipher not supported %d\n", key->cipher);
  493. return;
  494. }
  495. desc_info->sec_en = true;
  496. desc_info->sec_keyid = key->keyidx;
  497. desc_info->sec_type = sec_type;
  498. desc_info->sec_cam_idx = sec_cam->sec_cam_idx;
  499. if (!chip->hw_sec_hdr)
  500. return;
  501. pn64 = atomic64_inc_return(&key->tx_pn);
  502. desc_info->sec_seq[0] = pn64;
  503. desc_info->sec_seq[1] = pn64 >> 8;
  504. desc_info->sec_seq[2] = pn64 >> 16;
  505. desc_info->sec_seq[3] = pn64 >> 24;
  506. desc_info->sec_seq[4] = pn64 >> 32;
  507. desc_info->sec_seq[5] = pn64 >> 40;
  508. desc_info->wp_offset = 1; /* in unit of 8 bytes for security header */
  509. }
  510. static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
  511. struct rtw89_core_tx_request *tx_req,
  512. const struct rtw89_chan *chan)
  513. {
  514. struct sk_buff *skb = tx_req->skb;
  515. struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
  516. struct ieee80211_vif *vif = tx_info->control.vif;
  517. u16 lowest_rate;
  518. if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE ||
  519. (vif && vif->p2p))
  520. lowest_rate = RTW89_HW_RATE_OFDM6;
  521. else if (chan->band_type == RTW89_BAND_2G)
  522. lowest_rate = RTW89_HW_RATE_CCK1;
  523. else
  524. lowest_rate = RTW89_HW_RATE_OFDM6;
  525. if (!vif || !vif->bss_conf.basic_rates || !tx_req->sta)
  526. return lowest_rate;
  527. return __ffs(vif->bss_conf.basic_rates) + lowest_rate;
  528. }
  529. static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
  530. struct rtw89_core_tx_request *tx_req)
  531. {
  532. struct ieee80211_vif *vif = tx_req->vif;
  533. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  534. struct ieee80211_sta *sta = tx_req->sta;
  535. struct rtw89_sta *rtwsta;
  536. if (!sta)
  537. return rtwvif->mac_id;
  538. rtwsta = (struct rtw89_sta *)sta->drv_priv;
  539. return rtwsta->mac_id;
  540. }
  541. static void
  542. rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
  543. struct rtw89_core_tx_request *tx_req)
  544. {
  545. struct ieee80211_vif *vif = tx_req->vif;
  546. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  547. struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
  548. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
  549. rtwvif->sub_entity_idx);
  550. u8 qsel, ch_dma;
  551. qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT;
  552. ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
  553. desc_info->qsel = qsel;
  554. desc_info->ch_dma = ch_dma;
  555. desc_info->port = desc_info->hiq ? rtwvif->port : 0;
  556. desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
  557. desc_info->hw_ssn_sel = RTW89_MGMT_HW_SSN_SEL;
  558. desc_info->hw_seq_mode = RTW89_MGMT_HW_SEQ_MODE;
  559. /* fixed data rate for mgmt frames */
  560. desc_info->en_wd_info = true;
  561. desc_info->use_rate = true;
  562. desc_info->dis_data_fb = true;
  563. desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req, chan);
  564. rtw89_debug(rtwdev, RTW89_DBG_TXRX,
  565. "tx mgmt frame with rate 0x%x on channel %d (band %d, bw %d)\n",
  566. desc_info->data_rate, chan->channel, chan->band_type,
  567. chan->band_width);
  568. }
  569. static void
  570. rtw89_core_tx_update_h2c_info(struct rtw89_dev *rtwdev,
  571. struct rtw89_core_tx_request *tx_req)
  572. {
  573. struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
  574. desc_info->is_bmc = false;
  575. desc_info->wd_page = false;
  576. desc_info->ch_dma = RTW89_DMA_H2C;
  577. }
  578. static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc,
  579. const struct rtw89_chan *chan)
  580. {
  581. static const u8 rtw89_bandwidth_to_om[] = {
  582. [RTW89_CHANNEL_WIDTH_20] = HTC_OM_CHANNEL_WIDTH_20,
  583. [RTW89_CHANNEL_WIDTH_40] = HTC_OM_CHANNEL_WIDTH_40,
  584. [RTW89_CHANNEL_WIDTH_80] = HTC_OM_CHANNEL_WIDTH_80,
  585. [RTW89_CHANNEL_WIDTH_160] = HTC_OM_CHANNEL_WIDTH_160_OR_80_80,
  586. [RTW89_CHANNEL_WIDTH_80_80] = HTC_OM_CHANNEL_WIDTH_160_OR_80_80,
  587. };
  588. const struct rtw89_chip_info *chip = rtwdev->chip;
  589. struct rtw89_hal *hal = &rtwdev->hal;
  590. u8 om_bandwidth;
  591. if (!chip->dis_2g_40m_ul_ofdma ||
  592. chan->band_type != RTW89_BAND_2G ||
  593. chan->band_width != RTW89_CHANNEL_WIDTH_40)
  594. return;
  595. om_bandwidth = chan->band_width < ARRAY_SIZE(rtw89_bandwidth_to_om) ?
  596. rtw89_bandwidth_to_om[chan->band_width] : 0;
  597. *htc = le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) |
  598. le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_OM, RTW89_HTC_MASK_CTL_ID) |
  599. le32_encode_bits(hal->rx_nss - 1, RTW89_HTC_MASK_HTC_OM_RX_NSS) |
  600. le32_encode_bits(om_bandwidth, RTW89_HTC_MASK_HTC_OM_CH_WIDTH) |
  601. le32_encode_bits(1, RTW89_HTC_MASK_HTC_OM_UL_MU_DIS) |
  602. le32_encode_bits(hal->tx_nss - 1, RTW89_HTC_MASK_HTC_OM_TX_NSTS) |
  603. le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_ER_SU_DIS) |
  604. le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_DL_MU_MIMO_RR) |
  605. le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_UL_MU_DATA_DIS);
  606. }
  607. static bool
  608. __rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev,
  609. struct rtw89_core_tx_request *tx_req,
  610. enum btc_pkt_type pkt_type)
  611. {
  612. struct ieee80211_sta *sta = tx_req->sta;
  613. struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
  614. struct sk_buff *skb = tx_req->skb;
  615. struct ieee80211_hdr *hdr = (void *)skb->data;
  616. __le16 fc = hdr->frame_control;
  617. /* AP IOT issue with EAPoL, ARP and DHCP */
  618. if (pkt_type < PACKET_MAX)
  619. return false;
  620. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  621. if (!sta || !sta->he_cap.has_he)
  622. #else
  623. if (!sta || !sta->deflink.he_cap.has_he)
  624. #endif
  625. return false;
  626. if (!ieee80211_is_data_qos(fc))
  627. return false;
  628. if (skb_headroom(skb) < IEEE80211_HT_CTL_LEN)
  629. return false;
  630. if (rtwsta && rtwsta->ra_report.might_fallback_legacy)
  631. return false;
  632. return true;
  633. }
  634. static void
  635. __rtw89_core_tx_adjust_he_qos_htc(struct rtw89_dev *rtwdev,
  636. struct rtw89_core_tx_request *tx_req)
  637. {
  638. struct ieee80211_sta *sta = tx_req->sta;
  639. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  640. struct sk_buff *skb = tx_req->skb;
  641. struct ieee80211_hdr *hdr = (void *)skb->data;
  642. __le16 fc = hdr->frame_control;
  643. void *data;
  644. __le32 *htc;
  645. u8 *qc;
  646. int hdr_len;
  647. hdr_len = ieee80211_has_a4(fc) ? 32 : 26;
  648. data = skb_push(skb, IEEE80211_HT_CTL_LEN);
  649. memmove(data, data + IEEE80211_HT_CTL_LEN, hdr_len);
  650. hdr = data;
  651. htc = data + hdr_len;
  652. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_ORDER);
  653. *htc = rtwsta->htc_template ? rtwsta->htc_template :
  654. le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) |
  655. le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_CAS, RTW89_HTC_MASK_CTL_ID);
  656. qc = data + hdr_len - IEEE80211_QOS_CTL_LEN;
  657. qc[0] |= IEEE80211_QOS_CTL_EOSP;
  658. }
  659. static void
  660. rtw89_core_tx_update_he_qos_htc(struct rtw89_dev *rtwdev,
  661. struct rtw89_core_tx_request *tx_req,
  662. enum btc_pkt_type pkt_type)
  663. {
  664. struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
  665. struct ieee80211_vif *vif = tx_req->vif;
  666. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  667. if (!__rtw89_core_tx_check_he_qos_htc(rtwdev, tx_req, pkt_type))
  668. goto desc_bk;
  669. __rtw89_core_tx_adjust_he_qos_htc(rtwdev, tx_req);
  670. desc_info->pkt_size += IEEE80211_HT_CTL_LEN;
  671. desc_info->a_ctrl_bsr = true;
  672. desc_bk:
  673. if (!rtwvif || rtwvif->last_a_ctrl == desc_info->a_ctrl_bsr)
  674. return;
  675. rtwvif->last_a_ctrl = desc_info->a_ctrl_bsr;
  676. desc_info->bk = true;
  677. }
  678. static u16 rtw89_core_get_data_rate(struct rtw89_dev *rtwdev,
  679. struct rtw89_core_tx_request *tx_req)
  680. {
  681. struct ieee80211_vif *vif = tx_req->vif;
  682. struct ieee80211_sta *sta = tx_req->sta;
  683. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  684. struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
  685. enum rtw89_sub_entity_idx idx = rtwvif->sub_entity_idx;
  686. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, idx);
  687. u16 lowest_rate;
  688. if (rate_pattern->enable)
  689. return rate_pattern->rate;
  690. if (vif->p2p)
  691. lowest_rate = RTW89_HW_RATE_OFDM6;
  692. else if (chan->band_type == RTW89_BAND_2G)
  693. lowest_rate = RTW89_HW_RATE_CCK1;
  694. else
  695. lowest_rate = RTW89_HW_RATE_OFDM6;
  696. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  697. if (!sta || !sta->supp_rates[chan->band_type])
  698. #else
  699. if (!sta || !sta->deflink.supp_rates[chan->band_type])
  700. #endif
  701. return lowest_rate;
  702. #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0)
  703. return __ffs(sta->supp_rates[chan->band_type]) + lowest_rate;
  704. #else
  705. return __ffs(sta->deflink.supp_rates[chan->band_type]) + lowest_rate;
  706. #endif
  707. }
  708. static void
  709. rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
  710. struct rtw89_core_tx_request *tx_req)
  711. {
  712. struct ieee80211_vif *vif = tx_req->vif;
  713. struct ieee80211_sta *sta = tx_req->sta;
  714. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  715. struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
  716. struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
  717. struct sk_buff *skb = tx_req->skb;
  718. u8 tid, tid_indicate;
  719. u8 qsel, ch_dma;
  720. tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
  721. tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid);
  722. qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid);
  723. ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
  724. desc_info->ch_dma = ch_dma;
  725. desc_info->tid_indicate = tid_indicate;
  726. desc_info->qsel = qsel;
  727. desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
  728. desc_info->port = desc_info->hiq ? rtwvif->port : 0;
  729. desc_info->er_cap = rtwsta ? rtwsta->er_cap : false;
  730. /* enable wd_info for AMPDU */
  731. desc_info->en_wd_info = true;
  732. if (IEEE80211_SKB_CB(skb)->control.hw_key)
  733. rtw89_core_tx_update_sec_key(rtwdev, tx_req);
  734. desc_info->data_retry_lowest_rate = rtw89_core_get_data_rate(rtwdev, tx_req);
  735. }
  736. static enum btc_pkt_type
  737. rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
  738. struct rtw89_core_tx_request *tx_req)
  739. {
  740. struct sk_buff *skb = tx_req->skb;
  741. struct udphdr *udphdr;
  742. if (IEEE80211_SKB_CB(skb)->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
  743. ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.eapol_notify_work);
  744. return PACKET_EAPOL;
  745. }
  746. if (skb->protocol == htons(ETH_P_ARP)) {
  747. ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.arp_notify_work);
  748. return PACKET_ARP;
  749. }
  750. if (skb->protocol == htons(ETH_P_IP) &&
  751. ip_hdr(skb)->protocol == IPPROTO_UDP) {
  752. udphdr = udp_hdr(skb);
  753. if (((udphdr->source == htons(67) && udphdr->dest == htons(68)) ||
  754. (udphdr->source == htons(68) && udphdr->dest == htons(67))) &&
  755. skb->len > 282) {
  756. ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.dhcp_notify_work);
  757. return PACKET_DHCP;
  758. }
  759. }
  760. if (skb->protocol == htons(ETH_P_IP) &&
  761. ip_hdr(skb)->protocol == IPPROTO_ICMP) {
  762. ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.icmp_notify_work);
  763. return PACKET_ICMP;
  764. }
  765. return PACKET_MAX;
  766. }
  767. static void rtw89_core_tx_update_llc_hdr(struct rtw89_dev *rtwdev,
  768. struct rtw89_tx_desc_info *desc_info,
  769. struct sk_buff *skb)
  770. {
  771. struct ieee80211_hdr *hdr = (void *)skb->data;
  772. __le16 fc = hdr->frame_control;
  773. desc_info->hdr_llc_len = ieee80211_hdrlen(fc);
  774. desc_info->hdr_llc_len >>= 1; /* in unit of 2 bytes */
  775. }
  776. static void
  777. rtw89_core_tx_wake(struct rtw89_dev *rtwdev,
  778. struct rtw89_core_tx_request *tx_req)
  779. {
  780. const struct rtw89_chip_info *chip = rtwdev->chip;
  781. if (!RTW89_CHK_FW_FEATURE(TX_WAKE, &rtwdev->fw))
  782. return;
  783. if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
  784. return;
  785. if (chip->chip_id != RTL8852C &&
  786. tx_req->tx_type != RTW89_CORE_TX_TYPE_MGMT)
  787. return;
  788. rtw89_mac_notify_wake(rtwdev);
  789. }
  790. static void
  791. rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
  792. struct rtw89_core_tx_request *tx_req)
  793. {
  794. struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
  795. struct sk_buff *skb = tx_req->skb;
  796. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  797. struct ieee80211_hdr *hdr = (void *)skb->data;
  798. enum rtw89_core_tx_type tx_type;
  799. enum btc_pkt_type pkt_type;
  800. bool is_bmc;
  801. u16 seq;
  802. seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
  803. if (tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD) {
  804. tx_type = rtw89_core_get_tx_type(rtwdev, skb);
  805. tx_req->tx_type = tx_type;
  806. }
  807. is_bmc = (is_broadcast_ether_addr(hdr->addr1) ||
  808. is_multicast_ether_addr(hdr->addr1));
  809. desc_info->seq = seq;
  810. desc_info->pkt_size = skb->len;
  811. desc_info->is_bmc = is_bmc;
  812. desc_info->wd_page = true;
  813. desc_info->hiq = info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM;
  814. switch (tx_req->tx_type) {
  815. case RTW89_CORE_TX_TYPE_MGMT:
  816. rtw89_core_tx_update_mgmt_info(rtwdev, tx_req);
  817. break;
  818. case RTW89_CORE_TX_TYPE_DATA:
  819. rtw89_core_tx_update_data_info(rtwdev, tx_req);
  820. pkt_type = rtw89_core_tx_btc_spec_pkt_notify(rtwdev, tx_req);
  821. rtw89_core_tx_update_he_qos_htc(rtwdev, tx_req, pkt_type);
  822. rtw89_core_tx_update_ampdu_info(rtwdev, tx_req, pkt_type);
  823. rtw89_core_tx_update_llc_hdr(rtwdev, desc_info, skb);
  824. break;
  825. case RTW89_CORE_TX_TYPE_FWCMD:
  826. rtw89_core_tx_update_h2c_info(rtwdev, tx_req);
  827. break;
  828. }
  829. }
  830. void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel)
  831. {
  832. u8 ch_dma;
  833. ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
  834. rtw89_hci_tx_kick_off(rtwdev, ch_dma);
  835. }
  836. int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
  837. int qsel, unsigned int timeout)
  838. {
  839. struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
  840. struct rtw89_tx_wait_info *wait;
  841. unsigned long time_left;
  842. int ret = 0;
  843. wait = kzalloc(sizeof(*wait), GFP_KERNEL);
  844. if (!wait) {
  845. rtw89_core_tx_kick_off(rtwdev, qsel);
  846. return 0;
  847. }
  848. init_completion(&wait->completion);
  849. rcu_assign_pointer(skb_data->wait, wait);
  850. rtw89_core_tx_kick_off(rtwdev, qsel);
  851. time_left = wait_for_completion_timeout(&wait->completion,
  852. msecs_to_jiffies(timeout));
  853. if (time_left == 0)
  854. ret = -ETIMEDOUT;
  855. else if (!wait->tx_done)
  856. ret = -EAGAIN;
  857. rcu_assign_pointer(skb_data->wait, NULL);
  858. kfree_rcu(wait, rcu_head);
  859. return ret;
  860. }
  861. int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
  862. struct sk_buff *skb, bool fwdl)
  863. {
  864. struct rtw89_core_tx_request tx_req = {0};
  865. u32 cnt;
  866. int ret;
  867. if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
  868. rtw89_debug(rtwdev, RTW89_DBG_FW,
  869. "ignore h2c due to power is off with firmware state=%d\n",
  870. test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags));
  871. dev_kfree_skb(skb);
  872. return 0;
  873. }
  874. tx_req.skb = skb;
  875. tx_req.tx_type = RTW89_CORE_TX_TYPE_FWCMD;
  876. if (fwdl)
  877. tx_req.desc_info.fw_dl = true;
  878. rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
  879. if (!fwdl)
  880. rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "H2C: ", skb->data, skb->len);
  881. cnt = rtw89_hci_check_and_reclaim_tx_resource(rtwdev, RTW89_TXCH_CH12);
  882. if (cnt == 0) {
  883. rtw89_err(rtwdev, "no tx fwcmd resource\n");
  884. return -ENOSPC;
  885. }
  886. ret = rtw89_hci_tx_write(rtwdev, &tx_req);
  887. if (ret) {
  888. rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
  889. return ret;
  890. }
  891. rtw89_hci_tx_kick_off(rtwdev, RTW89_TXCH_CH12);
  892. return 0;
  893. }
  894. int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
  895. struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel)
  896. {
  897. struct rtw89_core_tx_request tx_req = {0};
  898. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  899. int ret;
  900. tx_req.skb = skb;
  901. tx_req.sta = sta;
  902. tx_req.vif = vif;
  903. rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true);
  904. rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true);
  905. rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
  906. rtw89_core_tx_wake(rtwdev, &tx_req);
  907. ret = rtw89_hci_tx_write(rtwdev, &tx_req);
  908. if (ret) {
  909. rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
  910. return ret;
  911. }
  912. if (qsel)
  913. *qsel = tx_req.desc_info.qsel;
  914. return 0;
  915. }
  916. static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
  917. {
  918. u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET, desc_info->wp_offset) |
  919. FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) |
  920. FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) |
  921. FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
  922. FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) |
  923. FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl) |
  924. FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_SEL, desc_info->hw_ssn_sel) |
  925. FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_MODE, desc_info->hw_seq_mode);
  926. return cpu_to_le32(dword);
  927. }
  928. static __le32 rtw89_build_txwd_body0_v1(struct rtw89_tx_desc_info *desc_info)
  929. {
  930. u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET_V1, desc_info->wp_offset) |
  931. FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) |
  932. FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) |
  933. FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
  934. FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) |
  935. FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl);
  936. return cpu_to_le32(dword);
  937. }
  938. static __le32 rtw89_build_txwd_body1_v1(struct rtw89_tx_desc_info *desc_info)
  939. {
  940. u32 dword = FIELD_PREP(RTW89_TXWD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) |
  941. FIELD_PREP(RTW89_TXWD_BODY1_SEC_KEYID, desc_info->sec_keyid) |
  942. FIELD_PREP(RTW89_TXWD_BODY1_SEC_TYPE, desc_info->sec_type);
  943. return cpu_to_le32(dword);
  944. }
  945. static __le32 rtw89_build_txwd_body2(struct rtw89_tx_desc_info *desc_info)
  946. {
  947. u32 dword = FIELD_PREP(RTW89_TXWD_BODY2_TID_INDICATE, desc_info->tid_indicate) |
  948. FIELD_PREP(RTW89_TXWD_BODY2_QSEL, desc_info->qsel) |
  949. FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size) |
  950. FIELD_PREP(RTW89_TXWD_BODY2_MACID, desc_info->mac_id);
  951. return cpu_to_le32(dword);
  952. }
  953. static __le32 rtw89_build_txwd_body3(struct rtw89_tx_desc_info *desc_info)
  954. {
  955. u32 dword = FIELD_PREP(RTW89_TXWD_BODY3_SW_SEQ, desc_info->seq) |
  956. FIELD_PREP(RTW89_TXWD_BODY3_AGG_EN, desc_info->agg_en) |
  957. FIELD_PREP(RTW89_TXWD_BODY3_BK, desc_info->bk);
  958. return cpu_to_le32(dword);
  959. }
  960. static __le32 rtw89_build_txwd_body4(struct rtw89_tx_desc_info *desc_info)
  961. {
  962. u32 dword = FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) |
  963. FIELD_PREP(RTW89_TXWD_BODY4_SEC_IV_L1, desc_info->sec_seq[1]);
  964. return cpu_to_le32(dword);
  965. }
  966. static __le32 rtw89_build_txwd_body5(struct rtw89_tx_desc_info *desc_info)
  967. {
  968. u32 dword = FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H2, desc_info->sec_seq[2]) |
  969. FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H3, desc_info->sec_seq[3]) |
  970. FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H4, desc_info->sec_seq[4]) |
  971. FIELD_PREP(RTW89_TXWD_BODY5_SEC_IV_H5, desc_info->sec_seq[5]);
  972. return cpu_to_le32(dword);
  973. }
  974. static __le32 rtw89_build_txwd_body7_v1(struct rtw89_tx_desc_info *desc_info)
  975. {
  976. u32 dword = FIELD_PREP(RTW89_TXWD_BODY7_USE_RATE_V1, desc_info->use_rate) |
  977. FIELD_PREP(RTW89_TXWD_BODY7_DATA_RATE, desc_info->data_rate);
  978. return cpu_to_le32(dword);
  979. }
  980. static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
  981. {
  982. u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) |
  983. FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) |
  984. FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
  985. FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
  986. return cpu_to_le32(dword);
  987. }
  988. static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info)
  989. {
  990. u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
  991. FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port) |
  992. FIELD_PREP(RTW89_TXWD_INFO0_DATA_ER, desc_info->er_cap) |
  993. FIELD_PREP(RTW89_TXWD_INFO0_DATA_BW_ER, 0);
  994. return cpu_to_le32(dword);
  995. }
  996. static __le32 rtw89_build_txwd_info1(struct rtw89_tx_desc_info *desc_info)
  997. {
  998. u32 dword = FIELD_PREP(RTW89_TXWD_INFO1_MAX_AGGNUM, desc_info->ampdu_num) |
  999. FIELD_PREP(RTW89_TXWD_INFO1_A_CTRL_BSR, desc_info->a_ctrl_bsr) |
  1000. FIELD_PREP(RTW89_TXWD_INFO1_DATA_RTY_LOWEST_RATE,
  1001. desc_info->data_retry_lowest_rate);
  1002. return cpu_to_le32(dword);
  1003. }
  1004. static __le32 rtw89_build_txwd_info2(struct rtw89_tx_desc_info *desc_info)
  1005. {
  1006. u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) |
  1007. FIELD_PREP(RTW89_TXWD_INFO2_SEC_TYPE, desc_info->sec_type) |
  1008. FIELD_PREP(RTW89_TXWD_INFO2_SEC_HW_ENC, desc_info->sec_en) |
  1009. FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx);
  1010. return cpu_to_le32(dword);
  1011. }
  1012. static __le32 rtw89_build_txwd_info2_v1(struct rtw89_tx_desc_info *desc_info)
  1013. {
  1014. u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) |
  1015. FIELD_PREP(RTW89_TXWD_INFO2_FORCE_KEY_EN, desc_info->sec_en) |
  1016. FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx);
  1017. return cpu_to_le32(dword);
  1018. }
  1019. static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info)
  1020. {
  1021. u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, 1) |
  1022. FIELD_PREP(RTW89_TXWD_INFO4_HW_RTS_EN, 1);
  1023. return cpu_to_le32(dword);
  1024. }
  1025. void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
  1026. struct rtw89_tx_desc_info *desc_info,
  1027. void *txdesc)
  1028. {
  1029. struct rtw89_txwd_body *txwd_body = (struct rtw89_txwd_body *)txdesc;
  1030. struct rtw89_txwd_info *txwd_info;
  1031. txwd_body->dword0 = rtw89_build_txwd_body0(desc_info);
  1032. txwd_body->dword2 = rtw89_build_txwd_body2(desc_info);
  1033. txwd_body->dword3 = rtw89_build_txwd_body3(desc_info);
  1034. if (!desc_info->en_wd_info)
  1035. return;
  1036. txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1);
  1037. txwd_info->dword0 = rtw89_build_txwd_info0(desc_info);
  1038. txwd_info->dword1 = rtw89_build_txwd_info1(desc_info);
  1039. txwd_info->dword2 = rtw89_build_txwd_info2(desc_info);
  1040. txwd_info->dword4 = rtw89_build_txwd_info4(desc_info);
  1041. }
  1042. EXPORT_SYMBOL(rtw89_core_fill_txdesc);
  1043. void rtw89_core_fill_txdesc_v1(struct rtw89_dev *rtwdev,
  1044. struct rtw89_tx_desc_info *desc_info,
  1045. void *txdesc)
  1046. {
  1047. struct rtw89_txwd_body_v1 *txwd_body = (struct rtw89_txwd_body_v1 *)txdesc;
  1048. struct rtw89_txwd_info *txwd_info;
  1049. txwd_body->dword0 = rtw89_build_txwd_body0_v1(desc_info);
  1050. txwd_body->dword1 = rtw89_build_txwd_body1_v1(desc_info);
  1051. txwd_body->dword2 = rtw89_build_txwd_body2(desc_info);
  1052. txwd_body->dword3 = rtw89_build_txwd_body3(desc_info);
  1053. if (desc_info->sec_en) {
  1054. txwd_body->dword4 = rtw89_build_txwd_body4(desc_info);
  1055. txwd_body->dword5 = rtw89_build_txwd_body5(desc_info);
  1056. }
  1057. txwd_body->dword7 = rtw89_build_txwd_body7_v1(desc_info);
  1058. if (!desc_info->en_wd_info)
  1059. return;
  1060. txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1);
  1061. txwd_info->dword0 = rtw89_build_txwd_info0_v1(desc_info);
  1062. txwd_info->dword1 = rtw89_build_txwd_info1(desc_info);
  1063. txwd_info->dword2 = rtw89_build_txwd_info2_v1(desc_info);
  1064. txwd_info->dword4 = rtw89_build_txwd_info4(desc_info);
  1065. }
  1066. EXPORT_SYMBOL(rtw89_core_fill_txdesc_v1);
  1067. static __le32 rtw89_build_txwd_body0_v2(struct rtw89_tx_desc_info *desc_info)
  1068. {
  1069. u32 dword = FIELD_PREP(BE_TXD_BODY0_WP_OFFSET_V1, desc_info->wp_offset) |
  1070. FIELD_PREP(BE_TXD_BODY0_WDINFO_EN, desc_info->en_wd_info) |
  1071. FIELD_PREP(BE_TXD_BODY0_CH_DMA, desc_info->ch_dma) |
  1072. FIELD_PREP(BE_TXD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
  1073. FIELD_PREP(BE_TXD_BODY0_WD_PAGE, desc_info->wd_page);
  1074. return cpu_to_le32(dword);
  1075. }
  1076. static __le32 rtw89_build_txwd_body1_v2(struct rtw89_tx_desc_info *desc_info)
  1077. {
  1078. u32 dword = FIELD_PREP(BE_TXD_BODY1_ADDR_INFO_NUM, desc_info->addr_info_nr) |
  1079. FIELD_PREP(BE_TXD_BODY1_SEC_KEYID, desc_info->sec_keyid) |
  1080. FIELD_PREP(BE_TXD_BODY1_SEC_TYPE, desc_info->sec_type);
  1081. return cpu_to_le32(dword);
  1082. }
  1083. static __le32 rtw89_build_txwd_body2_v2(struct rtw89_tx_desc_info *desc_info)
  1084. {
  1085. u32 dword = FIELD_PREP(BE_TXD_BODY2_TID_IND, desc_info->tid_indicate) |
  1086. FIELD_PREP(BE_TXD_BODY2_QSEL, desc_info->qsel) |
  1087. FIELD_PREP(BE_TXD_BODY2_TXPKTSIZE, desc_info->pkt_size) |
  1088. FIELD_PREP(BE_TXD_BODY2_AGG_EN, desc_info->agg_en) |
  1089. FIELD_PREP(BE_TXD_BODY2_BK, desc_info->bk) |
  1090. FIELD_PREP(BE_TXD_BODY2_MACID, desc_info->mac_id);
  1091. return cpu_to_le32(dword);
  1092. }
  1093. static __le32 rtw89_build_txwd_body3_v2(struct rtw89_tx_desc_info *desc_info)
  1094. {
  1095. u32 dword = FIELD_PREP(BE_TXD_BODY3_WIFI_SEQ, desc_info->seq);
  1096. return cpu_to_le32(dword);
  1097. }
  1098. static __le32 rtw89_build_txwd_body4_v2(struct rtw89_tx_desc_info *desc_info)
  1099. {
  1100. u32 dword = FIELD_PREP(BE_TXD_BODY4_SEC_IV_L0, desc_info->sec_seq[0]) |
  1101. FIELD_PREP(BE_TXD_BODY4_SEC_IV_L1, desc_info->sec_seq[1]);
  1102. return cpu_to_le32(dword);
  1103. }
  1104. static __le32 rtw89_build_txwd_body5_v2(struct rtw89_tx_desc_info *desc_info)
  1105. {
  1106. u32 dword = FIELD_PREP(BE_TXD_BODY5_SEC_IV_H2, desc_info->sec_seq[2]) |
  1107. FIELD_PREP(BE_TXD_BODY5_SEC_IV_H3, desc_info->sec_seq[3]) |
  1108. FIELD_PREP(BE_TXD_BODY5_SEC_IV_H4, desc_info->sec_seq[4]) |
  1109. FIELD_PREP(BE_TXD_BODY5_SEC_IV_H5, desc_info->sec_seq[5]);
  1110. return cpu_to_le32(dword);
  1111. }
  1112. static __le32 rtw89_build_txwd_body7_v2(struct rtw89_tx_desc_info *desc_info)
  1113. {
  1114. u32 dword = FIELD_PREP(BE_TXD_BODY7_USERATE_SEL, desc_info->use_rate) |
  1115. FIELD_PREP(BE_TXD_BODY7_DATA_ER, desc_info->er_cap) |
  1116. FIELD_PREP(BE_TXD_BODY7_DATA_BW_ER, 0) |
  1117. FIELD_PREP(BE_TXD_BODY7_DATARATE, desc_info->data_rate);
  1118. return cpu_to_le32(dword);
  1119. }
  1120. static __le32 rtw89_build_txwd_info0_v2(struct rtw89_tx_desc_info *desc_info)
  1121. {
  1122. u32 dword = FIELD_PREP(BE_TXD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
  1123. FIELD_PREP(BE_TXD_INFO0_MULTIPORT_ID, desc_info->port);
  1124. return cpu_to_le32(dword);
  1125. }
  1126. static __le32 rtw89_build_txwd_info1_v2(struct rtw89_tx_desc_info *desc_info)
  1127. {
  1128. u32 dword = FIELD_PREP(BE_TXD_INFO1_MAX_AGG_NUM, desc_info->ampdu_num) |
  1129. FIELD_PREP(BE_TXD_INFO1_A_CTRL_BSR, desc_info->a_ctrl_bsr) |
  1130. FIELD_PREP(BE_TXD_INFO1_DATA_RTY_LOWEST_RATE,
  1131. desc_info->data_retry_lowest_rate);
  1132. return cpu_to_le32(dword);
  1133. }
  1134. static __le32 rtw89_build_txwd_info2_v2(struct rtw89_tx_desc_info *desc_info)
  1135. {
  1136. u32 dword = FIELD_PREP(BE_TXD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) |
  1137. FIELD_PREP(BE_TXD_INFO2_FORCE_KEY_EN, desc_info->sec_en) |
  1138. FIELD_PREP(BE_TXD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx);
  1139. return cpu_to_le32(dword);
  1140. }
  1141. static __le32 rtw89_build_txwd_info4_v2(struct rtw89_tx_desc_info *desc_info)
  1142. {
  1143. u32 dword = FIELD_PREP(BE_TXD_INFO4_RTS_EN, 1) |
  1144. FIELD_PREP(BE_TXD_INFO4_HW_RTS_EN, 1);
  1145. return cpu_to_le32(dword);
  1146. }
  1147. void rtw89_core_fill_txdesc_v2(struct rtw89_dev *rtwdev,
  1148. struct rtw89_tx_desc_info *desc_info,
  1149. void *txdesc)
  1150. {
  1151. struct rtw89_txwd_body_v2 *txwd_body = txdesc;
  1152. struct rtw89_txwd_info_v2 *txwd_info;
  1153. txwd_body->dword0 = rtw89_build_txwd_body0_v2(desc_info);
  1154. txwd_body->dword1 = rtw89_build_txwd_body1_v2(desc_info);
  1155. txwd_body->dword2 = rtw89_build_txwd_body2_v2(desc_info);
  1156. txwd_body->dword3 = rtw89_build_txwd_body3_v2(desc_info);
  1157. if (desc_info->sec_en) {
  1158. txwd_body->dword4 = rtw89_build_txwd_body4_v2(desc_info);
  1159. txwd_body->dword5 = rtw89_build_txwd_body5_v2(desc_info);
  1160. }
  1161. txwd_body->dword7 = rtw89_build_txwd_body7_v2(desc_info);
  1162. if (!desc_info->en_wd_info)
  1163. return;
  1164. txwd_info = (struct rtw89_txwd_info_v2 *)(txwd_body + 1);
  1165. txwd_info->dword0 = rtw89_build_txwd_info0_v2(desc_info);
  1166. txwd_info->dword1 = rtw89_build_txwd_info1_v2(desc_info);
  1167. txwd_info->dword2 = rtw89_build_txwd_info2_v2(desc_info);
  1168. txwd_info->dword4 = rtw89_build_txwd_info4_v2(desc_info);
  1169. }
  1170. EXPORT_SYMBOL(rtw89_core_fill_txdesc_v2);
  1171. static __le32 rtw89_build_txwd_fwcmd0_v1(struct rtw89_tx_desc_info *desc_info)
  1172. {
  1173. u32 dword = FIELD_PREP(AX_RXD_RPKT_LEN_MASK, desc_info->pkt_size) |
  1174. FIELD_PREP(AX_RXD_RPKT_TYPE_MASK, desc_info->fw_dl ?
  1175. RTW89_CORE_RX_TYPE_FWDL :
  1176. RTW89_CORE_RX_TYPE_H2C);
  1177. return cpu_to_le32(dword);
  1178. }
  1179. void rtw89_core_fill_txdesc_fwcmd_v1(struct rtw89_dev *rtwdev,
  1180. struct rtw89_tx_desc_info *desc_info,
  1181. void *txdesc)
  1182. {
  1183. struct rtw89_rxdesc_short *txwd_v1 = (struct rtw89_rxdesc_short *)txdesc;
  1184. txwd_v1->dword0 = rtw89_build_txwd_fwcmd0_v1(desc_info);
  1185. }
  1186. EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v1);
  1187. static __le32 rtw89_build_txwd_fwcmd0_v2(struct rtw89_tx_desc_info *desc_info)
  1188. {
  1189. u32 dword = FIELD_PREP(BE_RXD_RPKT_LEN_MASK, desc_info->pkt_size) |
  1190. FIELD_PREP(BE_RXD_RPKT_TYPE_MASK, desc_info->fw_dl ?
  1191. RTW89_CORE_RX_TYPE_FWDL :
  1192. RTW89_CORE_RX_TYPE_H2C);
  1193. return cpu_to_le32(dword);
  1194. }
  1195. void rtw89_core_fill_txdesc_fwcmd_v2(struct rtw89_dev *rtwdev,
  1196. struct rtw89_tx_desc_info *desc_info,
  1197. void *txdesc)
  1198. {
  1199. struct rtw89_rxdesc_short_v2 *txwd_v2 = (struct rtw89_rxdesc_short_v2 *)txdesc;
  1200. txwd_v2->dword0 = rtw89_build_txwd_fwcmd0_v2(desc_info);
  1201. }
  1202. EXPORT_SYMBOL(rtw89_core_fill_txdesc_fwcmd_v2);
  1203. static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev,
  1204. struct sk_buff *skb,
  1205. struct rtw89_rx_phy_ppdu *phy_ppdu)
  1206. {
  1207. const struct rtw89_rxinfo *rxinfo = (const struct rtw89_rxinfo *)skb->data;
  1208. bool rx_cnt_valid = false;
  1209. u8 plcp_size = 0;
  1210. u8 usr_num = 0;
  1211. u8 *phy_sts;
  1212. rx_cnt_valid = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_RX_CNT_VLD);
  1213. plcp_size = le32_get_bits(rxinfo->w1, RTW89_RXINFO_W1_PLCP_LEN) << 3;
  1214. usr_num = le32_get_bits(rxinfo->w0, RTW89_RXINFO_W0_USR_NUM);
  1215. if (usr_num > RTW89_PPDU_MAX_USR) {
  1216. rtw89_warn(rtwdev, "Invalid user number in mac info\n");
  1217. return -EINVAL;
  1218. }
  1219. phy_sts = skb->data + RTW89_PPDU_MAC_INFO_SIZE;
  1220. phy_sts += usr_num * RTW89_PPDU_MAC_INFO_USR_SIZE;
  1221. /* 8-byte alignment */
  1222. if (usr_num & BIT(0))
  1223. phy_sts += RTW89_PPDU_MAC_INFO_USR_SIZE;
  1224. if (rx_cnt_valid)
  1225. phy_sts += RTW89_PPDU_MAC_RX_CNT_SIZE;
  1226. phy_sts += plcp_size;
  1227. phy_ppdu->buf = phy_sts;
  1228. phy_ppdu->len = skb->data + skb->len - phy_sts;
  1229. return 0;
  1230. }
  1231. static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
  1232. struct ieee80211_sta *sta)
  1233. {
  1234. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  1235. struct rtw89_rx_phy_ppdu *phy_ppdu = (struct rtw89_rx_phy_ppdu *)data;
  1236. struct rtw89_dev *rtwdev = rtwsta->rtwdev;
  1237. struct rtw89_hal *hal = &rtwdev->hal;
  1238. u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
  1239. u8 ant_pos = U8_MAX;
  1240. u8 evm_pos = 0;
  1241. int i;
  1242. if (rtwsta->mac_id != phy_ppdu->mac_id || !phy_ppdu->to_self)
  1243. return;
  1244. if (hal->ant_diversity && hal->antenna_rx) {
  1245. ant_pos = __ffs(hal->antenna_rx);
  1246. evm_pos = ant_pos;
  1247. }
  1248. ewma_rssi_add(&rtwsta->avg_rssi, phy_ppdu->rssi_avg);
  1249. if (ant_pos < ant_num) {
  1250. ewma_rssi_add(&rtwsta->rssi[ant_pos], phy_ppdu->rssi[0]);
  1251. } else {
  1252. for (i = 0; i < rtwdev->chip->rf_path_num; i++)
  1253. ewma_rssi_add(&rtwsta->rssi[i], phy_ppdu->rssi[i]);
  1254. }
  1255. if (phy_ppdu->ofdm.has) {
  1256. ewma_snr_add(&rtwsta->avg_snr, phy_ppdu->ofdm.avg_snr);
  1257. ewma_evm_add(&rtwsta->evm_min[evm_pos], phy_ppdu->ofdm.evm_min);
  1258. ewma_evm_add(&rtwsta->evm_max[evm_pos], phy_ppdu->ofdm.evm_max);
  1259. }
  1260. }
  1261. #define VAR_LEN 0xff
  1262. #define VAR_LEN_UNIT 8
  1263. static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev,
  1264. const struct rtw89_phy_sts_iehdr *iehdr)
  1265. {
  1266. static const u8 physts_ie_len_tab[32] = {
  1267. 16, 32, 24, 24, 8, 8, 8, 8, VAR_LEN, 8, VAR_LEN, 176, VAR_LEN,
  1268. VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 16, 24, VAR_LEN,
  1269. VAR_LEN, VAR_LEN, 0, 24, 24, 24, 24, 32, 32, 32, 32
  1270. };
  1271. u16 ie_len;
  1272. u8 ie;
  1273. ie = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_TYPE);
  1274. if (physts_ie_len_tab[ie] != VAR_LEN)
  1275. ie_len = physts_ie_len_tab[ie];
  1276. else
  1277. ie_len = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_LEN) * VAR_LEN_UNIT;
  1278. return ie_len;
  1279. }
  1280. static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
  1281. const struct rtw89_phy_sts_iehdr *iehdr,
  1282. struct rtw89_rx_phy_ppdu *phy_ppdu)
  1283. {
  1284. const struct rtw89_phy_sts_ie0 *ie = (const struct rtw89_phy_sts_ie0 *)iehdr;
  1285. s16 cfo;
  1286. u32 t;
  1287. phy_ppdu->chan_idx = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_CH_IDX);
  1288. if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6)
  1289. return;
  1290. if (!phy_ppdu->to_self)
  1291. return;
  1292. phy_ppdu->ofdm.avg_snr = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_AVG_SNR);
  1293. phy_ppdu->ofdm.evm_max = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MAX);
  1294. phy_ppdu->ofdm.evm_min = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_EVM_MIN);
  1295. phy_ppdu->ofdm.has = true;
  1296. /* sign conversion for S(12,2) */
  1297. if (rtwdev->chip->cfo_src_fd) {
  1298. t = le32_get_bits(ie->w1, RTW89_PHY_STS_IE01_W1_FD_CFO);
  1299. cfo = sign_extend32(t, 11);
  1300. } else {
  1301. t = le32_get_bits(ie->w1, RTW89_PHY_STS_IE01_W1_PREMB_CFO);
  1302. cfo = sign_extend32(t, 11);
  1303. }
  1304. rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu);
  1305. }
  1306. static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev,
  1307. const struct rtw89_phy_sts_iehdr *iehdr,
  1308. struct rtw89_rx_phy_ppdu *phy_ppdu)
  1309. {
  1310. u8 ie;
  1311. ie = le32_get_bits(iehdr->w0, RTW89_PHY_STS_IEHDR_TYPE);
  1312. switch (ie) {
  1313. case RTW89_PHYSTS_IE01_CMN_OFDM:
  1314. rtw89_core_parse_phy_status_ie01(rtwdev, iehdr, phy_ppdu);
  1315. break;
  1316. default:
  1317. break;
  1318. }
  1319. return 0;
  1320. }
  1321. static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
  1322. {
  1323. const struct rtw89_phy_sts_hdr *hdr = phy_ppdu->buf;
  1324. u8 *rssi = phy_ppdu->rssi;
  1325. phy_ppdu->ie = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_IE_MAP);
  1326. phy_ppdu->rssi_avg = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_RSSI_AVG);
  1327. rssi[RF_PATH_A] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_A);
  1328. rssi[RF_PATH_B] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_B);
  1329. rssi[RF_PATH_C] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_C);
  1330. rssi[RF_PATH_D] = le32_get_bits(hdr->w1, RTW89_PHY_STS_HDR_W1_RSSI_D);
  1331. }
  1332. static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
  1333. struct rtw89_rx_phy_ppdu *phy_ppdu)
  1334. {
  1335. const struct rtw89_phy_sts_hdr *hdr = phy_ppdu->buf;
  1336. u32 len_from_header;
  1337. len_from_header = le32_get_bits(hdr->w0, RTW89_PHY_STS_HDR_W0_LEN) << 3;
  1338. if (len_from_header != phy_ppdu->len) {
  1339. rtw89_debug(rtwdev, RTW89_DBG_UNEXP, "phy ppdu len mismatch\n");
  1340. return -EINVAL;
  1341. }
  1342. rtw89_core_update_phy_ppdu(phy_ppdu);
  1343. return 0;
  1344. }
  1345. static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev,
  1346. struct rtw89_rx_phy_ppdu *phy_ppdu)
  1347. {
  1348. u16 ie_len;
  1349. void *pos, *end;
  1350. /* mark invalid reports and bypass them */
  1351. if (phy_ppdu->ie < RTW89_CCK_PKT)
  1352. return -EINVAL;
  1353. pos = phy_ppdu->buf + PHY_STS_HDR_LEN;
  1354. end = phy_ppdu->buf + phy_ppdu->len;
  1355. while (pos < end) {
  1356. const struct rtw89_phy_sts_iehdr *iehdr = pos;
  1357. ie_len = rtw89_core_get_phy_status_ie_len(rtwdev, iehdr);
  1358. rtw89_core_process_phy_status_ie(rtwdev, iehdr, phy_ppdu);
  1359. pos += ie_len;
  1360. if (pos > end || ie_len == 0) {
  1361. rtw89_debug(rtwdev, RTW89_DBG_TXRX,
  1362. "phy status parse failed\n");
  1363. return -EINVAL;
  1364. }
  1365. }
  1366. rtw89_phy_antdiv_parse(rtwdev, phy_ppdu);
  1367. return 0;
  1368. }
  1369. static void rtw89_core_rx_process_phy_sts(struct rtw89_dev *rtwdev,
  1370. struct rtw89_rx_phy_ppdu *phy_ppdu)
  1371. {
  1372. int ret;
  1373. ret = rtw89_core_rx_parse_phy_sts(rtwdev, phy_ppdu);
  1374. if (ret)
  1375. rtw89_debug(rtwdev, RTW89_DBG_TXRX, "parse phy sts failed\n");
  1376. else
  1377. phy_ppdu->valid = true;
  1378. ieee80211_iterate_stations_atomic(rtwdev->hw,
  1379. rtw89_core_rx_process_phy_ppdu_iter,
  1380. phy_ppdu);
  1381. }
  1382. static u8 rtw89_rxdesc_to_nl_he_gi(struct rtw89_dev *rtwdev,
  1383. const struct rtw89_rx_desc_info *desc_info,
  1384. bool rx_status)
  1385. {
  1386. switch (desc_info->gi_ltf) {
  1387. case RTW89_GILTF_SGI_4XHE08:
  1388. case RTW89_GILTF_2XHE08:
  1389. case RTW89_GILTF_1XHE08:
  1390. return NL80211_RATE_INFO_HE_GI_0_8;
  1391. case RTW89_GILTF_2XHE16:
  1392. case RTW89_GILTF_1XHE16:
  1393. return NL80211_RATE_INFO_HE_GI_1_6;
  1394. case RTW89_GILTF_LGI_4XHE32:
  1395. return NL80211_RATE_INFO_HE_GI_3_2;
  1396. default:
  1397. rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info->gi_ltf);
  1398. return rx_status ? NL80211_RATE_INFO_HE_GI_3_2 : U8_MAX;
  1399. }
  1400. }
  1401. static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev,
  1402. struct rtw89_rx_desc_info *desc_info,
  1403. struct ieee80211_rx_status *status)
  1404. {
  1405. u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
  1406. u8 data_rate_mode, bw, rate_idx = MASKBYTE0, gi_ltf;
  1407. u16 data_rate;
  1408. bool ret;
  1409. data_rate = desc_info->data_rate;
  1410. data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate);
  1411. if (data_rate_mode == DATA_RATE_MODE_NON_HT) {
  1412. rate_idx = rtw89_get_data_not_ht_idx(rtwdev, data_rate);
  1413. /* rate_idx is still hardware value here */
  1414. } else if (data_rate_mode == DATA_RATE_MODE_HT) {
  1415. rate_idx = rtw89_get_data_ht_mcs(rtwdev, data_rate);
  1416. } else if (data_rate_mode == DATA_RATE_MODE_VHT) {
  1417. rate_idx = rtw89_get_data_mcs(rtwdev, data_rate);
  1418. } else if (data_rate_mode == DATA_RATE_MODE_HE) {
  1419. rate_idx = rtw89_get_data_mcs(rtwdev, data_rate);
  1420. } else {
  1421. rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
  1422. }
  1423. bw = rtw89_hw_to_rate_info_bw(desc_info->bw);
  1424. gi_ltf = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, false);
  1425. ret = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band] == desc_info->ppdu_cnt &&
  1426. status->rate_idx == rate_idx &&
  1427. status->he_gi == gi_ltf &&
  1428. status->bw == bw;
  1429. return ret;
  1430. }
  1431. struct rtw89_vif_rx_stats_iter_data {
  1432. struct rtw89_dev *rtwdev;
  1433. struct rtw89_rx_phy_ppdu *phy_ppdu;
  1434. struct rtw89_rx_desc_info *desc_info;
  1435. struct sk_buff *skb;
  1436. const u8 *bssid;
  1437. };
  1438. #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)
  1439. static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev,
  1440. struct ieee80211_vif *vif,
  1441. struct sk_buff *skb)
  1442. {
  1443. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  1444. struct ieee80211_trigger *tf = (struct ieee80211_trigger *)skb->data;
  1445. #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 7, 0)
  1446. u8 *pos, *end, type, tf_bw;
  1447. #else
  1448. u8 *pos, *end, type;
  1449. #endif
  1450. u16 aid, tf_rua;
  1451. if (!ether_addr_equal(vif->bss_conf.bssid, tf->ta) ||
  1452. rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION ||
  1453. rtwvif->net_type == RTW89_NET_TYPE_NO_LINK)
  1454. return;
  1455. type = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_TYPE_MASK);
  1456. if (type != IEEE80211_TRIGGER_TYPE_BASIC && type != IEEE80211_TRIGGER_TYPE_MU_BAR)
  1457. return;
  1458. end = (u8 *)tf + skb->len;
  1459. pos = tf->variable;
  1460. while (end - pos >= RTW89_TF_BASIC_USER_INFO_SZ) {
  1461. aid = RTW89_GET_TF_USER_INFO_AID12(pos);
  1462. tf_rua = RTW89_GET_TF_USER_INFO_RUA(pos);
  1463. #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 7, 0)
  1464. tf_bw = le64_get_bits(tf->common_info, IEEE80211_TRIGGER_ULBW_MASK);
  1465. rtw89_debug(rtwdev, RTW89_DBG_TXRX,
  1466. "[TF] aid: %d, ul_mcs: %d, rua: %d, bw: %d\n",
  1467. aid, RTW89_GET_TF_USER_INFO_UL_MCS(pos),
  1468. tf_rua, tf_bw);
  1469. #endif
  1470. if (aid == RTW89_TF_PAD)
  1471. break;
  1472. if (aid == vif->cfg.aid) {
  1473. #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 7, 0)
  1474. enum nl80211_he_ru_alloc rua = rtw89_he_rua_to_ru_alloc(tf_rua >> 1);
  1475. #endif
  1476. rtwvif->stats.rx_tf_acc++;
  1477. rtwdev->stats.rx_tf_acc++;
  1478. #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 7, 0)
  1479. if (tf_bw == IEEE80211_TRIGGER_ULBW_160_80P80MHZ &&
  1480. rua <= NL80211_RATE_INFO_HE_RU_ALLOC_106)
  1481. rtwvif->pwr_diff_en = true;
  1482. break;
  1483. #endif
  1484. }
  1485. pos += RTW89_TF_BASIC_USER_INFO_SZ;
  1486. }
  1487. }
  1488. #endif
  1489. static void rtw89_cancel_6ghz_probe_work(struct work_struct *work)
  1490. {
  1491. struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
  1492. cancel_6ghz_probe_work);
  1493. struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
  1494. struct rtw89_pktofld_info *info;
  1495. mutex_lock(&rtwdev->mutex);
  1496. if (!rtwdev->scanning)
  1497. goto out;
  1498. list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) {
  1499. if (!info->cancel || !test_bit(info->id, rtwdev->pkt_offload))
  1500. continue;
  1501. rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
  1502. /* Don't delete/free info from pkt_list at this moment. Let it
  1503. * be deleted/freed in rtw89_release_pkt_list() after scanning,
  1504. * since if during scanning, pkt_list is accessed in bottom half.
  1505. */
  1506. }
  1507. out:
  1508. mutex_unlock(&rtwdev->mutex);
  1509. }
  1510. static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev,
  1511. struct sk_buff *skb)
  1512. {
  1513. struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
  1514. struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
  1515. struct list_head *pkt_list = rtwdev->scan_info.pkt_list;
  1516. struct rtw89_pktofld_info *info;
  1517. const u8 *ies = mgmt->u.beacon.variable, *ssid_ie;
  1518. bool queue_work = false;
  1519. if (rx_status->band != NL80211_BAND_6GHZ)
  1520. return;
  1521. ssid_ie = cfg80211_find_ie(WLAN_EID_SSID, ies, skb->len);
  1522. list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) {
  1523. if (ether_addr_equal(info->bssid, mgmt->bssid)) {
  1524. info->cancel = true;
  1525. queue_work = true;
  1526. continue;
  1527. }
  1528. if (!ssid_ie || ssid_ie[1] != info->ssid_len || info->ssid_len == 0)
  1529. continue;
  1530. if (memcmp(&ssid_ie[2], info->ssid, info->ssid_len) == 0) {
  1531. info->cancel = true;
  1532. queue_work = true;
  1533. }
  1534. }
  1535. if (queue_work)
  1536. ieee80211_queue_work(rtwdev->hw, &rtwdev->cancel_6ghz_probe_work);
  1537. }
  1538. static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
  1539. struct ieee80211_vif *vif)
  1540. {
  1541. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  1542. struct rtw89_vif_rx_stats_iter_data *iter_data = data;
  1543. struct rtw89_dev *rtwdev = iter_data->rtwdev;
  1544. struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
  1545. struct rtw89_rx_desc_info *desc_info = iter_data->desc_info;
  1546. struct sk_buff *skb = iter_data->skb;
  1547. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  1548. struct rtw89_rx_phy_ppdu *phy_ppdu = iter_data->phy_ppdu;
  1549. const u8 *bssid = iter_data->bssid;
  1550. if (rtwdev->scanning &&
  1551. (ieee80211_is_beacon(hdr->frame_control) ||
  1552. ieee80211_is_probe_resp(hdr->frame_control)))
  1553. rtw89_core_cancel_6ghz_probe_tx(rtwdev, skb);
  1554. #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)
  1555. if (!vif->bss_conf.bssid)
  1556. return;
  1557. if (ieee80211_is_trigger(hdr->frame_control)) {
  1558. rtw89_stats_trigger_frame(rtwdev, vif, skb);
  1559. return;
  1560. }
  1561. #endif
  1562. if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
  1563. return;
  1564. if (ieee80211_is_beacon(hdr->frame_control)) {
  1565. if (vif->type == NL80211_IFTYPE_STATION)
  1566. rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
  1567. pkt_stat->beacon_nr++;
  1568. }
  1569. if (!ether_addr_equal(vif->addr, hdr->addr1))
  1570. return;
  1571. if (desc_info->data_rate < RTW89_HW_RATE_NR)
  1572. pkt_stat->rx_rate_cnt[desc_info->data_rate]++;
  1573. rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, false);
  1574. }
  1575. static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev,
  1576. struct rtw89_rx_phy_ppdu *phy_ppdu,
  1577. struct rtw89_rx_desc_info *desc_info,
  1578. struct sk_buff *skb)
  1579. {
  1580. struct rtw89_vif_rx_stats_iter_data iter_data;
  1581. rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, false);
  1582. iter_data.rtwdev = rtwdev;
  1583. iter_data.phy_ppdu = phy_ppdu;
  1584. iter_data.desc_info = desc_info;
  1585. iter_data.skb = skb;
  1586. iter_data.bssid = get_hdr_bssid((struct ieee80211_hdr *)skb->data);
  1587. rtw89_iterate_vifs_bh(rtwdev, rtw89_vif_rx_stats_iter, &iter_data);
  1588. }
  1589. static void rtw89_correct_cck_chan(struct rtw89_dev *rtwdev,
  1590. struct ieee80211_rx_status *status)
  1591. {
  1592. const struct rtw89_chan_rcd *rcd =
  1593. rtw89_chan_rcd_get(rtwdev, RTW89_SUB_ENTITY_0);
  1594. u16 chan = rcd->prev_primary_channel;
  1595. u8 band = rtw89_hw_to_nl80211_band(rcd->prev_band_type);
  1596. if (status->band != NL80211_BAND_2GHZ &&
  1597. status->encoding == RX_ENC_LEGACY &&
  1598. status->rate_idx < RTW89_HW_RATE_OFDM6) {
  1599. status->freq = ieee80211_channel_to_frequency(chan, band);
  1600. status->band = band;
  1601. }
  1602. }
  1603. static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status)
  1604. {
  1605. if (rx_status->band == NL80211_BAND_2GHZ ||
  1606. rx_status->encoding != RX_ENC_LEGACY)
  1607. return;
  1608. /* Some control frames' freq(ACKs in this case) are reported wrong due
  1609. * to FW notify timing, set to lowest rate to prevent overflow.
  1610. */
  1611. if (rx_status->rate_idx < RTW89_HW_RATE_OFDM6) {
  1612. rx_status->rate_idx = 0;
  1613. return;
  1614. }
  1615. /* No 4 CCK rates for non-2G */
  1616. rx_status->rate_idx -= 4;
  1617. }
  1618. static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev,
  1619. struct sk_buff *skb,
  1620. struct ieee80211_rx_status *rx_status)
  1621. {
  1622. static const struct ieee80211_radiotap_he known_he = {
  1623. .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
  1624. IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
  1625. .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
  1626. };
  1627. struct ieee80211_radiotap_he *he;
  1628. if (!(rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR))
  1629. return;
  1630. if (rx_status->encoding == RX_ENC_HE) {
  1631. rx_status->flag |= RX_FLAG_RADIOTAP_HE;
  1632. he = skb_push(skb, sizeof(*he));
  1633. *he = known_he;
  1634. }
  1635. }
  1636. static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
  1637. struct rtw89_rx_phy_ppdu *phy_ppdu,
  1638. struct rtw89_rx_desc_info *desc_info,
  1639. struct sk_buff *skb_ppdu,
  1640. struct ieee80211_rx_status *rx_status)
  1641. {
  1642. struct napi_struct *napi = &rtwdev->napi;
  1643. /* In low power mode, napi isn't scheduled. Receive it to netif. */
  1644. if (unlikely(!test_bit(NAPI_STATE_SCHED, &napi->state)))
  1645. napi = NULL;
  1646. rtw89_core_hw_to_sband_rate(rx_status);
  1647. rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
  1648. rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
  1649. /* In low power mode, it does RX in thread context. */
  1650. local_bh_disable();
  1651. ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, napi);
  1652. local_bh_enable();
  1653. rtwdev->napi_budget_countdown--;
  1654. }
  1655. static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev,
  1656. struct rtw89_rx_phy_ppdu *phy_ppdu,
  1657. struct rtw89_rx_desc_info *desc_info,
  1658. struct sk_buff *skb)
  1659. {
  1660. u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
  1661. int curr = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band];
  1662. struct sk_buff *skb_ppdu = NULL, *tmp;
  1663. struct ieee80211_rx_status *rx_status;
  1664. if (curr > RTW89_MAX_PPDU_CNT)
  1665. return;
  1666. skb_queue_walk_safe(&rtwdev->ppdu_sts.rx_queue[band], skb_ppdu, tmp) {
  1667. skb_unlink(skb_ppdu, &rtwdev->ppdu_sts.rx_queue[band]);
  1668. rx_status = IEEE80211_SKB_RXCB(skb_ppdu);
  1669. if (rtw89_core_rx_ppdu_match(rtwdev, desc_info, rx_status))
  1670. rtw89_chip_query_ppdu(rtwdev, phy_ppdu, rx_status);
  1671. rtw89_correct_cck_chan(rtwdev, rx_status);
  1672. rtw89_core_rx_to_mac80211(rtwdev, phy_ppdu, desc_info, skb_ppdu, rx_status);
  1673. }
  1674. }
  1675. static void rtw89_core_rx_process_ppdu_sts(struct rtw89_dev *rtwdev,
  1676. struct rtw89_rx_desc_info *desc_info,
  1677. struct sk_buff *skb)
  1678. {
  1679. struct rtw89_rx_phy_ppdu phy_ppdu = {.buf = skb->data, .valid = false,
  1680. .len = skb->len,
  1681. .to_self = desc_info->addr1_match,
  1682. .rate = desc_info->data_rate,
  1683. .mac_id = desc_info->mac_id};
  1684. int ret;
  1685. if (desc_info->mac_info_valid)
  1686. rtw89_core_rx_process_mac_ppdu(rtwdev, skb, &phy_ppdu);
  1687. ret = rtw89_core_rx_process_phy_ppdu(rtwdev, &phy_ppdu);
  1688. if (ret)
  1689. rtw89_debug(rtwdev, RTW89_DBG_TXRX, "process ppdu failed\n");
  1690. rtw89_core_rx_process_phy_sts(rtwdev, &phy_ppdu);
  1691. rtw89_core_rx_pending_skb(rtwdev, &phy_ppdu, desc_info, skb);
  1692. dev_kfree_skb_any(skb);
  1693. }
  1694. static void rtw89_core_rx_process_report(struct rtw89_dev *rtwdev,
  1695. struct rtw89_rx_desc_info *desc_info,
  1696. struct sk_buff *skb)
  1697. {
  1698. switch (desc_info->pkt_type) {
  1699. case RTW89_CORE_RX_TYPE_C2H:
  1700. rtw89_fw_c2h_irqsafe(rtwdev, skb);
  1701. break;
  1702. case RTW89_CORE_RX_TYPE_PPDU_STAT:
  1703. rtw89_core_rx_process_ppdu_sts(rtwdev, desc_info, skb);
  1704. break;
  1705. default:
  1706. rtw89_debug(rtwdev, RTW89_DBG_TXRX, "unhandled pkt_type=%d\n",
  1707. desc_info->pkt_type);
  1708. dev_kfree_skb_any(skb);
  1709. break;
  1710. }
  1711. }
  1712. void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev,
  1713. struct rtw89_rx_desc_info *desc_info,
  1714. u8 *data, u32 data_offset)
  1715. {
  1716. const struct rtw89_chip_info *chip = rtwdev->chip;
  1717. struct rtw89_rxdesc_short *rxd_s;
  1718. struct rtw89_rxdesc_long *rxd_l;
  1719. u8 shift_len, drv_info_len;
  1720. rxd_s = (struct rtw89_rxdesc_short *)(data + data_offset);
  1721. desc_info->pkt_size = le32_get_bits(rxd_s->dword0, AX_RXD_RPKT_LEN_MASK);
  1722. desc_info->drv_info_size = le32_get_bits(rxd_s->dword0, AX_RXD_DRV_INFO_SIZE_MASK);
  1723. desc_info->long_rxdesc = le32_get_bits(rxd_s->dword0, AX_RXD_LONG_RXD);
  1724. desc_info->pkt_type = le32_get_bits(rxd_s->dword0, AX_RXD_RPKT_TYPE_MASK);
  1725. desc_info->mac_info_valid = le32_get_bits(rxd_s->dword0, AX_RXD_MAC_INFO_VLD);
  1726. if (chip->chip_id == RTL8852C)
  1727. desc_info->bw = le32_get_bits(rxd_s->dword1, AX_RXD_BW_v1_MASK);
  1728. else
  1729. desc_info->bw = le32_get_bits(rxd_s->dword1, AX_RXD_BW_MASK);
  1730. desc_info->data_rate = le32_get_bits(rxd_s->dword1, AX_RXD_RX_DATARATE_MASK);
  1731. desc_info->gi_ltf = le32_get_bits(rxd_s->dword1, AX_RXD_RX_GI_LTF_MASK);
  1732. desc_info->user_id = le32_get_bits(rxd_s->dword1, AX_RXD_USER_ID_MASK);
  1733. desc_info->sr_en = le32_get_bits(rxd_s->dword1, AX_RXD_SR_EN);
  1734. desc_info->ppdu_cnt = le32_get_bits(rxd_s->dword1, AX_RXD_PPDU_CNT_MASK);
  1735. desc_info->ppdu_type = le32_get_bits(rxd_s->dword1, AX_RXD_PPDU_TYPE_MASK);
  1736. desc_info->free_run_cnt = le32_get_bits(rxd_s->dword2, AX_RXD_FREERUN_CNT_MASK);
  1737. desc_info->icv_err = le32_get_bits(rxd_s->dword3, AX_RXD_ICV_ERR);
  1738. desc_info->crc32_err = le32_get_bits(rxd_s->dword3, AX_RXD_CRC32_ERR);
  1739. desc_info->hw_dec = le32_get_bits(rxd_s->dword3, AX_RXD_HW_DEC);
  1740. desc_info->sw_dec = le32_get_bits(rxd_s->dword3, AX_RXD_SW_DEC);
  1741. desc_info->addr1_match = le32_get_bits(rxd_s->dword3, AX_RXD_A1_MATCH);
  1742. shift_len = desc_info->shift << 1; /* 2-byte unit */
  1743. drv_info_len = desc_info->drv_info_size << 3; /* 8-byte unit */
  1744. desc_info->offset = data_offset + shift_len + drv_info_len;
  1745. if (desc_info->long_rxdesc)
  1746. desc_info->rxd_len = sizeof(struct rtw89_rxdesc_long);
  1747. else
  1748. desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short);
  1749. desc_info->ready = true;
  1750. if (!desc_info->long_rxdesc)
  1751. return;
  1752. rxd_l = (struct rtw89_rxdesc_long *)(data + data_offset);
  1753. desc_info->frame_type = le32_get_bits(rxd_l->dword4, AX_RXD_TYPE_MASK);
  1754. desc_info->addr_cam_valid = le32_get_bits(rxd_l->dword5, AX_RXD_ADDR_CAM_VLD);
  1755. desc_info->addr_cam_id = le32_get_bits(rxd_l->dword5, AX_RXD_ADDR_CAM_MASK);
  1756. desc_info->sec_cam_id = le32_get_bits(rxd_l->dword5, AX_RXD_SEC_CAM_IDX_MASK);
  1757. desc_info->mac_id = le32_get_bits(rxd_l->dword5, AX_RXD_MAC_ID_MASK);
  1758. desc_info->rx_pl_id = le32_get_bits(rxd_l->dword5, AX_RXD_RX_PL_ID_MASK);
  1759. }
  1760. EXPORT_SYMBOL(rtw89_core_query_rxdesc);
  1761. void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
  1762. struct rtw89_rx_desc_info *desc_info,
  1763. u8 *data, u32 data_offset)
  1764. {
  1765. struct rtw89_rxdesc_short_v2 *rxd_s;
  1766. struct rtw89_rxdesc_long_v2 *rxd_l;
  1767. u16 shift_len, drv_info_len, phy_rtp_len, hdr_cnv_len;
  1768. rxd_s = (struct rtw89_rxdesc_short_v2 *)(data + data_offset);
  1769. desc_info->pkt_size = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_LEN_MASK);
  1770. desc_info->drv_info_size = le32_get_bits(rxd_s->dword0, BE_RXD_DRV_INFO_SZ_MASK);
  1771. desc_info->phy_rpt_size = le32_get_bits(rxd_s->dword0, BE_RXD_PHY_RPT_SZ_MASK);
  1772. desc_info->hdr_cnv_size = le32_get_bits(rxd_s->dword0, BE_RXD_HDR_CNV_SZ_MASK);
  1773. desc_info->shift = le32_get_bits(rxd_s->dword0, BE_RXD_SHIFT_MASK);
  1774. desc_info->long_rxdesc = le32_get_bits(rxd_s->dword0, BE_RXD_LONG_RXD);
  1775. desc_info->pkt_type = le32_get_bits(rxd_s->dword0, BE_RXD_RPKT_TYPE_MASK);
  1776. if (desc_info->pkt_type == RTW89_CORE_RX_TYPE_PPDU_STAT)
  1777. desc_info->mac_info_valid = true;
  1778. desc_info->frame_type = le32_get_bits(rxd_s->dword2, BE_RXD_TYPE_MASK);
  1779. desc_info->mac_id = le32_get_bits(rxd_s->dword2, BE_RXD_MAC_ID_MASK);
  1780. desc_info->addr_cam_valid = le32_get_bits(rxd_s->dword2, BE_RXD_ADDR_CAM_VLD);
  1781. desc_info->icv_err = le32_get_bits(rxd_s->dword3, BE_RXD_ICV_ERR);
  1782. desc_info->crc32_err = le32_get_bits(rxd_s->dword3, BE_RXD_CRC32_ERR);
  1783. desc_info->hw_dec = le32_get_bits(rxd_s->dword3, BE_RXD_HW_DEC);
  1784. desc_info->sw_dec = le32_get_bits(rxd_s->dword3, BE_RXD_SW_DEC);
  1785. desc_info->addr1_match = le32_get_bits(rxd_s->dword3, BE_RXD_A1_MATCH);
  1786. desc_info->bw = le32_get_bits(rxd_s->dword4, BE_RXD_BW_MASK);
  1787. desc_info->data_rate = le32_get_bits(rxd_s->dword4, BE_RXD_RX_DATARATE_MASK);
  1788. desc_info->gi_ltf = le32_get_bits(rxd_s->dword4, BE_RXD_RX_GI_LTF_MASK);
  1789. desc_info->ppdu_cnt = le32_get_bits(rxd_s->dword4, BE_RXD_PPDU_CNT_MASK);
  1790. desc_info->ppdu_type = le32_get_bits(rxd_s->dword4, BE_RXD_PPDU_TYPE_MASK);
  1791. desc_info->free_run_cnt = le32_to_cpu(rxd_s->dword5);
  1792. shift_len = desc_info->shift << 1; /* 2-byte unit */
  1793. drv_info_len = desc_info->drv_info_size << 3; /* 8-byte unit */
  1794. phy_rtp_len = desc_info->phy_rpt_size << 3; /* 8-byte unit */
  1795. hdr_cnv_len = desc_info->hdr_cnv_size << 4; /* 16-byte unit */
  1796. desc_info->offset = data_offset + shift_len + drv_info_len +
  1797. phy_rtp_len + hdr_cnv_len;
  1798. if (desc_info->long_rxdesc)
  1799. desc_info->rxd_len = sizeof(struct rtw89_rxdesc_long_v2);
  1800. else
  1801. desc_info->rxd_len = sizeof(struct rtw89_rxdesc_short_v2);
  1802. desc_info->ready = true;
  1803. if (!desc_info->long_rxdesc)
  1804. return;
  1805. rxd_l = (struct rtw89_rxdesc_long_v2 *)(data + data_offset);
  1806. desc_info->sr_en = le32_get_bits(rxd_l->dword6, BE_RXD_SR_EN);
  1807. desc_info->user_id = le32_get_bits(rxd_l->dword6, BE_RXD_USER_ID_MASK);
  1808. desc_info->addr_cam_id = le32_get_bits(rxd_l->dword6, BE_RXD_ADDR_CAM_MASK);
  1809. desc_info->sec_cam_id = le32_get_bits(rxd_l->dword6, BE_RXD_SEC_CAM_IDX_MASK);
  1810. desc_info->rx_pl_id = le32_get_bits(rxd_l->dword7, BE_RXD_RX_PL_ID_MASK);
  1811. }
  1812. EXPORT_SYMBOL(rtw89_core_query_rxdesc_v2);
  1813. struct rtw89_core_iter_rx_status {
  1814. struct rtw89_dev *rtwdev;
  1815. struct ieee80211_rx_status *rx_status;
  1816. struct rtw89_rx_desc_info *desc_info;
  1817. u8 mac_id;
  1818. };
  1819. static
  1820. void rtw89_core_stats_sta_rx_status_iter(void *data, struct ieee80211_sta *sta)
  1821. {
  1822. struct rtw89_core_iter_rx_status *iter_data =
  1823. (struct rtw89_core_iter_rx_status *)data;
  1824. struct ieee80211_rx_status *rx_status = iter_data->rx_status;
  1825. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  1826. struct rtw89_rx_desc_info *desc_info = iter_data->desc_info;
  1827. u8 mac_id = iter_data->mac_id;
  1828. if (mac_id != rtwsta->mac_id)
  1829. return;
  1830. rtwsta->rx_status = *rx_status;
  1831. rtwsta->rx_hw_rate = desc_info->data_rate;
  1832. }
  1833. static void rtw89_core_stats_sta_rx_status(struct rtw89_dev *rtwdev,
  1834. struct rtw89_rx_desc_info *desc_info,
  1835. struct ieee80211_rx_status *rx_status)
  1836. {
  1837. struct rtw89_core_iter_rx_status iter_data;
  1838. if (!desc_info->addr1_match || !desc_info->long_rxdesc)
  1839. return;
  1840. if (desc_info->frame_type != RTW89_RX_TYPE_DATA)
  1841. return;
  1842. iter_data.rtwdev = rtwdev;
  1843. iter_data.rx_status = rx_status;
  1844. iter_data.desc_info = desc_info;
  1845. iter_data.mac_id = desc_info->mac_id;
  1846. ieee80211_iterate_stations_atomic(rtwdev->hw,
  1847. rtw89_core_stats_sta_rx_status_iter,
  1848. &iter_data);
  1849. }
  1850. static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
  1851. struct rtw89_rx_desc_info *desc_info,
  1852. struct ieee80211_rx_status *rx_status)
  1853. {
  1854. const struct cfg80211_chan_def *chandef =
  1855. rtw89_chandef_get(rtwdev, RTW89_SUB_ENTITY_0);
  1856. u16 data_rate;
  1857. u8 data_rate_mode;
  1858. /* currently using single PHY */
  1859. rx_status->freq = chandef->chan->center_freq;
  1860. rx_status->band = chandef->chan->band;
  1861. if (rtwdev->scanning &&
  1862. RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
  1863. const struct rtw89_chan *cur = rtw89_scan_chan_get(rtwdev);
  1864. u8 chan = cur->primary_channel;
  1865. u8 band = cur->band_type;
  1866. enum nl80211_band nl_band;
  1867. nl_band = rtw89_hw_to_nl80211_band(band);
  1868. rx_status->freq = ieee80211_channel_to_frequency(chan, nl_band);
  1869. rx_status->band = nl_band;
  1870. }
  1871. if (desc_info->icv_err || desc_info->crc32_err)
  1872. rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
  1873. if (desc_info->hw_dec &&
  1874. !(desc_info->sw_dec || desc_info->icv_err))
  1875. rx_status->flag |= RX_FLAG_DECRYPTED;
  1876. rx_status->bw = rtw89_hw_to_rate_info_bw(desc_info->bw);
  1877. data_rate = desc_info->data_rate;
  1878. data_rate_mode = rtw89_get_data_rate_mode(rtwdev, data_rate);
  1879. if (data_rate_mode == DATA_RATE_MODE_NON_HT) {
  1880. rx_status->encoding = RX_ENC_LEGACY;
  1881. rx_status->rate_idx = rtw89_get_data_not_ht_idx(rtwdev, data_rate);
  1882. /* convert rate_idx after we get the correct band */
  1883. } else if (data_rate_mode == DATA_RATE_MODE_HT) {
  1884. rx_status->encoding = RX_ENC_HT;
  1885. rx_status->rate_idx = rtw89_get_data_ht_mcs(rtwdev, data_rate);
  1886. if (desc_info->gi_ltf)
  1887. rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  1888. } else if (data_rate_mode == DATA_RATE_MODE_VHT) {
  1889. rx_status->encoding = RX_ENC_VHT;
  1890. rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate);
  1891. rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1;
  1892. if (desc_info->gi_ltf)
  1893. rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
  1894. } else if (data_rate_mode == DATA_RATE_MODE_HE) {
  1895. rx_status->encoding = RX_ENC_HE;
  1896. rx_status->rate_idx = rtw89_get_data_mcs(rtwdev, data_rate);
  1897. rx_status->nss = rtw89_get_data_nss(rtwdev, data_rate) + 1;
  1898. } else {
  1899. rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
  1900. }
  1901. /* he_gi is used to match ppdu, so we always fill it. */
  1902. rx_status->he_gi = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, true);
  1903. rx_status->flag |= RX_FLAG_MACTIME_START;
  1904. rx_status->mactime = desc_info->free_run_cnt;
  1905. rtw89_core_stats_sta_rx_status(rtwdev, desc_info, rx_status);
  1906. }
  1907. static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
  1908. {
  1909. const struct rtw89_chip_info *chip = rtwdev->chip;
  1910. if (rtw89_disable_ps_mode || !chip->ps_mode_supported ||
  1911. RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
  1912. return RTW89_PS_MODE_NONE;
  1913. if ((chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED)) &&
  1914. !RTW89_CHK_FW_FEATURE(NO_LPS_PG, &rtwdev->fw))
  1915. return RTW89_PS_MODE_PWR_GATED;
  1916. if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_CLK_GATED))
  1917. return RTW89_PS_MODE_CLK_GATED;
  1918. if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_RFOFF))
  1919. return RTW89_PS_MODE_RFOFF;
  1920. return RTW89_PS_MODE_NONE;
  1921. }
  1922. static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev,
  1923. struct rtw89_rx_desc_info *desc_info)
  1924. {
  1925. struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts;
  1926. u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
  1927. struct ieee80211_rx_status *rx_status;
  1928. struct sk_buff *skb_ppdu, *tmp;
  1929. skb_queue_walk_safe(&ppdu_sts->rx_queue[band], skb_ppdu, tmp) {
  1930. skb_unlink(skb_ppdu, &ppdu_sts->rx_queue[band]);
  1931. rx_status = IEEE80211_SKB_RXCB(skb_ppdu);
  1932. rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb_ppdu, rx_status);
  1933. }
  1934. }
  1935. void rtw89_core_rx(struct rtw89_dev *rtwdev,
  1936. struct rtw89_rx_desc_info *desc_info,
  1937. struct sk_buff *skb)
  1938. {
  1939. struct ieee80211_rx_status *rx_status;
  1940. struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts;
  1941. u8 ppdu_cnt = desc_info->ppdu_cnt;
  1942. u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
  1943. if (desc_info->pkt_type != RTW89_CORE_RX_TYPE_WIFI) {
  1944. rtw89_core_rx_process_report(rtwdev, desc_info, skb);
  1945. return;
  1946. }
  1947. if (ppdu_sts->curr_rx_ppdu_cnt[band] != ppdu_cnt) {
  1948. rtw89_core_flush_ppdu_rx_queue(rtwdev, desc_info);
  1949. ppdu_sts->curr_rx_ppdu_cnt[band] = ppdu_cnt;
  1950. }
  1951. rx_status = IEEE80211_SKB_RXCB(skb);
  1952. memset(rx_status, 0, sizeof(*rx_status));
  1953. rtw89_core_update_rx_status(rtwdev, desc_info, rx_status);
  1954. if (desc_info->long_rxdesc &&
  1955. BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP)
  1956. skb_queue_tail(&ppdu_sts->rx_queue[band], skb);
  1957. else
  1958. rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb, rx_status);
  1959. }
  1960. EXPORT_SYMBOL(rtw89_core_rx);
  1961. void rtw89_core_napi_start(struct rtw89_dev *rtwdev)
  1962. {
  1963. if (test_and_set_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
  1964. return;
  1965. napi_enable(&rtwdev->napi);
  1966. }
  1967. EXPORT_SYMBOL(rtw89_core_napi_start);
  1968. void rtw89_core_napi_stop(struct rtw89_dev *rtwdev)
  1969. {
  1970. if (!test_and_clear_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
  1971. return;
  1972. napi_synchronize(&rtwdev->napi);
  1973. napi_disable(&rtwdev->napi);
  1974. }
  1975. EXPORT_SYMBOL(rtw89_core_napi_stop);
  1976. void rtw89_core_napi_init(struct rtw89_dev *rtwdev)
  1977. {
  1978. init_dummy_netdev(&rtwdev->netdev);
  1979. #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
  1980. netif_napi_add(&rtwdev->netdev, &rtwdev->napi,
  1981. rtwdev->hci.ops->napi_poll);
  1982. #else
  1983. netif_napi_add(&rtwdev->netdev, &rtwdev->napi,
  1984. rtwdev->hci.ops->napi_poll, NAPI_POLL_WEIGHT);
  1985. #endif
  1986. }
  1987. EXPORT_SYMBOL(rtw89_core_napi_init);
  1988. void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev)
  1989. {
  1990. rtw89_core_napi_stop(rtwdev);
  1991. netif_napi_del(&rtwdev->napi);
  1992. }
  1993. EXPORT_SYMBOL(rtw89_core_napi_deinit);
  1994. static void rtw89_core_ba_work(struct work_struct *work)
  1995. {
  1996. struct rtw89_dev *rtwdev =
  1997. container_of(work, struct rtw89_dev, ba_work);
  1998. struct rtw89_txq *rtwtxq, *tmp;
  1999. int ret;
  2000. spin_lock_bh(&rtwdev->ba_lock);
  2001. list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) {
  2002. struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
  2003. struct ieee80211_sta *sta = txq->sta;
  2004. struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
  2005. u8 tid = txq->tid;
  2006. if (!sta) {
  2007. rtw89_warn(rtwdev, "cannot start BA without sta\n");
  2008. goto skip_ba_work;
  2009. }
  2010. if (rtwsta->disassoc) {
  2011. rtw89_debug(rtwdev, RTW89_DBG_TXRX,
  2012. "cannot start BA with disassoc sta\n");
  2013. goto skip_ba_work;
  2014. }
  2015. ret = ieee80211_start_tx_ba_session(sta, tid, 0);
  2016. if (ret) {
  2017. rtw89_debug(rtwdev, RTW89_DBG_TXRX,
  2018. "failed to setup BA session for %pM:%2d: %d\n",
  2019. sta->addr, tid, ret);
  2020. if (ret == -EINVAL)
  2021. set_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags);
  2022. }
  2023. skip_ba_work:
  2024. list_del_init(&rtwtxq->list);
  2025. }
  2026. spin_unlock_bh(&rtwdev->ba_lock);
  2027. }
  2028. static void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev,
  2029. struct ieee80211_sta *sta)
  2030. {
  2031. struct rtw89_txq *rtwtxq, *tmp;
  2032. spin_lock_bh(&rtwdev->ba_lock);
  2033. list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) {
  2034. struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
  2035. if (sta == txq->sta)
  2036. list_del_init(&rtwtxq->list);
  2037. }
  2038. spin_unlock_bh(&rtwdev->ba_lock);
  2039. }
  2040. static void rtw89_core_free_sta_pending_forbid_ba(struct rtw89_dev *rtwdev,
  2041. struct ieee80211_sta *sta)
  2042. {
  2043. struct rtw89_txq *rtwtxq, *tmp;
  2044. spin_lock_bh(&rtwdev->ba_lock);
  2045. list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) {
  2046. struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
  2047. if (sta == txq->sta) {
  2048. clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
  2049. list_del_init(&rtwtxq->list);
  2050. }
  2051. }
  2052. spin_unlock_bh(&rtwdev->ba_lock);
  2053. }
  2054. static void rtw89_core_free_sta_pending_roc_tx(struct rtw89_dev *rtwdev,
  2055. struct ieee80211_sta *sta)
  2056. {
  2057. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  2058. struct sk_buff *skb, *tmp;
  2059. skb_queue_walk_safe(&rtwsta->roc_queue, skb, tmp) {
  2060. skb_unlink(skb, &rtwsta->roc_queue);
  2061. dev_kfree_skb_any(skb);
  2062. }
  2063. }
  2064. static void rtw89_core_stop_tx_ba_session(struct rtw89_dev *rtwdev,
  2065. struct rtw89_txq *rtwtxq)
  2066. {
  2067. struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
  2068. struct ieee80211_sta *sta = txq->sta;
  2069. struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
  2070. if (unlikely(!rtwsta) || unlikely(rtwsta->disassoc))
  2071. return;
  2072. if (!test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags) ||
  2073. test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
  2074. return;
  2075. spin_lock_bh(&rtwdev->ba_lock);
  2076. if (!test_and_set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
  2077. list_add_tail(&rtwtxq->list, &rtwdev->forbid_ba_list);
  2078. spin_unlock_bh(&rtwdev->ba_lock);
  2079. ieee80211_stop_tx_ba_session(sta, txq->tid);
  2080. cancel_delayed_work(&rtwdev->forbid_ba_work);
  2081. ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->forbid_ba_work,
  2082. RTW89_FORBID_BA_TIMER);
  2083. }
  2084. static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
  2085. struct rtw89_txq *rtwtxq,
  2086. struct sk_buff *skb)
  2087. {
  2088. struct ieee80211_hw *hw = rtwdev->hw;
  2089. struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
  2090. struct ieee80211_sta *sta = txq->sta;
  2091. struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
  2092. if (test_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags))
  2093. return;
  2094. if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
  2095. rtw89_core_stop_tx_ba_session(rtwdev, rtwtxq);
  2096. return;
  2097. }
  2098. if (unlikely(!sta))
  2099. return;
  2100. if (unlikely(test_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags)))
  2101. return;
  2102. if (test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags)) {
  2103. IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_AMPDU;
  2104. return;
  2105. }
  2106. spin_lock_bh(&rtwdev->ba_lock);
  2107. if (!rtwsta->disassoc && list_empty(&rtwtxq->list)) {
  2108. list_add_tail(&rtwtxq->list, &rtwdev->ba_list);
  2109. ieee80211_queue_work(hw, &rtwdev->ba_work);
  2110. }
  2111. spin_unlock_bh(&rtwdev->ba_lock);
  2112. }
  2113. static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
  2114. struct rtw89_txq *rtwtxq,
  2115. unsigned long frame_cnt,
  2116. unsigned long byte_cnt)
  2117. {
  2118. struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
  2119. struct ieee80211_vif *vif = txq->vif;
  2120. struct ieee80211_sta *sta = txq->sta;
  2121. struct sk_buff *skb;
  2122. unsigned long i;
  2123. int ret;
  2124. rcu_read_lock();
  2125. for (i = 0; i < frame_cnt; i++) {
  2126. skb = ieee80211_tx_dequeue_ni(rtwdev->hw, txq);
  2127. if (!skb) {
  2128. rtw89_debug(rtwdev, RTW89_DBG_TXRX, "dequeue a NULL skb\n");
  2129. goto out;
  2130. }
  2131. rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb);
  2132. ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL);
  2133. if (ret) {
  2134. rtw89_err(rtwdev, "failed to push txq: %d\n", ret);
  2135. ieee80211_free_txskb(rtwdev->hw, skb);
  2136. break;
  2137. }
  2138. }
  2139. out:
  2140. rcu_read_unlock();
  2141. }
  2142. static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid)
  2143. {
  2144. u8 qsel, ch_dma;
  2145. qsel = rtw89_core_get_qsel(rtwdev, tid);
  2146. ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
  2147. return rtw89_hci_check_and_reclaim_tx_resource(rtwdev, ch_dma);
  2148. }
  2149. static bool rtw89_core_txq_agg_wait(struct rtw89_dev *rtwdev,
  2150. struct ieee80211_txq *txq,
  2151. unsigned long *frame_cnt,
  2152. bool *sched_txq, bool *reinvoke)
  2153. {
  2154. struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv;
  2155. struct ieee80211_sta *sta = txq->sta;
  2156. struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
  2157. if (!sta || rtwsta->max_agg_wait <= 0)
  2158. return false;
  2159. if (rtwdev->stats.tx_tfc_lv <= RTW89_TFC_MID)
  2160. return false;
  2161. if (*frame_cnt > 1) {
  2162. *frame_cnt -= 1;
  2163. *sched_txq = true;
  2164. *reinvoke = true;
  2165. rtwtxq->wait_cnt = 1;
  2166. return false;
  2167. }
  2168. if (*frame_cnt == 1 && rtwtxq->wait_cnt < rtwsta->max_agg_wait) {
  2169. *reinvoke = true;
  2170. rtwtxq->wait_cnt++;
  2171. return true;
  2172. }
  2173. rtwtxq->wait_cnt = 0;
  2174. return false;
  2175. }
  2176. static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinvoke)
  2177. {
  2178. struct ieee80211_hw *hw = rtwdev->hw;
  2179. struct ieee80211_txq *txq;
  2180. struct rtw89_vif *rtwvif;
  2181. struct rtw89_txq *rtwtxq;
  2182. unsigned long frame_cnt;
  2183. unsigned long byte_cnt;
  2184. u32 tx_resource;
  2185. bool sched_txq;
  2186. ieee80211_txq_schedule_start(hw, ac);
  2187. while ((txq = ieee80211_next_txq(hw, ac))) {
  2188. rtwtxq = (struct rtw89_txq *)txq->drv_priv;
  2189. rtwvif = (struct rtw89_vif *)txq->vif->drv_priv;
  2190. if (rtwvif->offchan) {
  2191. ieee80211_return_txq(hw, txq, true);
  2192. continue;
  2193. }
  2194. tx_resource = rtw89_check_and_reclaim_tx_resource(rtwdev, txq->tid);
  2195. sched_txq = false;
  2196. ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
  2197. if (rtw89_core_txq_agg_wait(rtwdev, txq, &frame_cnt, &sched_txq, reinvoke)) {
  2198. ieee80211_return_txq(hw, txq, true);
  2199. continue;
  2200. }
  2201. frame_cnt = min_t(unsigned long, frame_cnt, tx_resource);
  2202. rtw89_core_txq_push(rtwdev, rtwtxq, frame_cnt, byte_cnt);
  2203. ieee80211_return_txq(hw, txq, sched_txq);
  2204. if (frame_cnt != 0)
  2205. rtw89_core_tx_kick_off(rtwdev, rtw89_core_get_qsel(rtwdev, txq->tid));
  2206. /* bound of tx_resource could get stuck due to burst traffic */
  2207. if (frame_cnt == tx_resource)
  2208. *reinvoke = true;
  2209. }
  2210. ieee80211_txq_schedule_end(hw, ac);
  2211. }
  2212. static void rtw89_ips_work(struct work_struct *work)
  2213. {
  2214. struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
  2215. ips_work);
  2216. mutex_lock(&rtwdev->mutex);
  2217. rtw89_enter_ips_by_hwflags(rtwdev);
  2218. mutex_unlock(&rtwdev->mutex);
  2219. }
  2220. static void rtw89_core_txq_work(struct work_struct *w)
  2221. {
  2222. struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, txq_work);
  2223. bool reinvoke = false;
  2224. u8 ac;
  2225. for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
  2226. rtw89_core_txq_schedule(rtwdev, ac, &reinvoke);
  2227. if (reinvoke) {
  2228. /* reinvoke to process the last frame */
  2229. mod_delayed_work(rtwdev->txq_wq, &rtwdev->txq_reinvoke_work, 1);
  2230. }
  2231. }
  2232. static void rtw89_core_txq_reinvoke_work(struct work_struct *w)
  2233. {
  2234. struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev,
  2235. txq_reinvoke_work.work);
  2236. queue_work(rtwdev->txq_wq, &rtwdev->txq_work);
  2237. }
  2238. static void rtw89_forbid_ba_work(struct work_struct *w)
  2239. {
  2240. struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev,
  2241. forbid_ba_work.work);
  2242. struct rtw89_txq *rtwtxq, *tmp;
  2243. spin_lock_bh(&rtwdev->ba_lock);
  2244. list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->forbid_ba_list, list) {
  2245. clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
  2246. list_del_init(&rtwtxq->list);
  2247. }
  2248. spin_unlock_bh(&rtwdev->ba_lock);
  2249. }
  2250. static void rtw89_core_sta_pending_tx_iter(void *data,
  2251. struct ieee80211_sta *sta)
  2252. {
  2253. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  2254. struct rtw89_vif *rtwvif_target = data, *rtwvif = rtwsta->rtwvif;
  2255. struct rtw89_dev *rtwdev = rtwvif->rtwdev;
  2256. struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
  2257. struct sk_buff *skb, *tmp;
  2258. int qsel, ret;
  2259. if (rtwvif->sub_entity_idx != rtwvif_target->sub_entity_idx)
  2260. return;
  2261. if (skb_queue_len(&rtwsta->roc_queue) == 0)
  2262. return;
  2263. skb_queue_walk_safe(&rtwsta->roc_queue, skb, tmp) {
  2264. skb_unlink(skb, &rtwsta->roc_queue);
  2265. ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel);
  2266. if (ret) {
  2267. rtw89_warn(rtwdev, "pending tx failed with %d\n", ret);
  2268. dev_kfree_skb_any(skb);
  2269. } else {
  2270. rtw89_core_tx_kick_off(rtwdev, qsel);
  2271. }
  2272. }
  2273. }
  2274. static void rtw89_core_handle_sta_pending_tx(struct rtw89_dev *rtwdev,
  2275. struct rtw89_vif *rtwvif)
  2276. {
  2277. ieee80211_iterate_stations_atomic(rtwdev->hw,
  2278. rtw89_core_sta_pending_tx_iter,
  2279. rtwvif);
  2280. }
  2281. static int rtw89_core_send_nullfunc(struct rtw89_dev *rtwdev,
  2282. struct rtw89_vif *rtwvif, bool qos, bool ps)
  2283. {
  2284. struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
  2285. struct ieee80211_sta *sta;
  2286. struct ieee80211_hdr *hdr;
  2287. struct sk_buff *skb;
  2288. int ret, qsel;
  2289. #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0)
  2290. if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
  2291. #else
  2292. if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
  2293. #endif
  2294. return 0;
  2295. rcu_read_lock();
  2296. sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
  2297. if (!sta) {
  2298. ret = -EINVAL;
  2299. goto out;
  2300. }
  2301. #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)
  2302. skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, qos);
  2303. #else
  2304. skb = ieee80211_nullfunc_get(rtwdev->hw, vif, qos);
  2305. #endif
  2306. if (!skb) {
  2307. ret = -ENOMEM;
  2308. goto out;
  2309. }
  2310. hdr = (struct ieee80211_hdr *)skb->data;
  2311. if (ps)
  2312. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
  2313. ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel);
  2314. if (ret) {
  2315. rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret);
  2316. dev_kfree_skb_any(skb);
  2317. goto out;
  2318. }
  2319. rcu_read_unlock();
  2320. return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, qsel,
  2321. RTW89_ROC_TX_TIMEOUT);
  2322. out:
  2323. rcu_read_unlock();
  2324. return ret;
  2325. }
  2326. void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  2327. {
  2328. const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
  2329. struct ieee80211_hw *hw = rtwdev->hw;
  2330. struct rtw89_roc *roc = &rtwvif->roc;
  2331. struct cfg80211_chan_def roc_chan;
  2332. struct rtw89_vif *tmp;
  2333. int ret;
  2334. lockdep_assert_held(&rtwdev->mutex);
  2335. ieee80211_queue_delayed_work(hw, &rtwvif->roc.roc_work,
  2336. msecs_to_jiffies(rtwvif->roc.duration));
  2337. rtw89_leave_ips_by_hwflags(rtwdev);
  2338. rtw89_leave_lps(rtwdev);
  2339. rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_ROC);
  2340. ret = rtw89_core_send_nullfunc(rtwdev, rtwvif, true, true);
  2341. if (ret)
  2342. rtw89_debug(rtwdev, RTW89_DBG_TXRX,
  2343. "roc send null-1 failed: %d\n", ret);
  2344. rtw89_for_each_rtwvif(rtwdev, tmp)
  2345. if (tmp->sub_entity_idx == rtwvif->sub_entity_idx)
  2346. tmp->offchan = true;
  2347. cfg80211_chandef_create(&roc_chan, &roc->chan, NL80211_CHAN_NO_HT);
  2348. rtw89_config_roc_chandef(rtwdev, rtwvif->sub_entity_idx, &roc_chan);
  2349. rtw89_set_channel(rtwdev);
  2350. rtw89_write32_clr(rtwdev,
  2351. rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
  2352. B_AX_A_UC_CAM_MATCH | B_AX_A_BC_CAM_MATCH);
  2353. ieee80211_ready_on_channel(hw);
  2354. }
  2355. void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  2356. {
  2357. const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
  2358. struct ieee80211_hw *hw = rtwdev->hw;
  2359. struct rtw89_roc *roc = &rtwvif->roc;
  2360. struct rtw89_vif *tmp;
  2361. int ret;
  2362. lockdep_assert_held(&rtwdev->mutex);
  2363. ieee80211_remain_on_channel_expired(hw);
  2364. rtw89_leave_ips_by_hwflags(rtwdev);
  2365. rtw89_leave_lps(rtwdev);
  2366. rtw89_write32_mask(rtwdev,
  2367. rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0),
  2368. B_AX_RX_FLTR_CFG_MASK,
  2369. rtwdev->hal.rx_fltr);
  2370. roc->state = RTW89_ROC_IDLE;
  2371. rtw89_config_roc_chandef(rtwdev, rtwvif->sub_entity_idx, NULL);
  2372. rtw89_chanctx_proceed(rtwdev);
  2373. ret = rtw89_core_send_nullfunc(rtwdev, rtwvif, true, false);
  2374. if (ret)
  2375. rtw89_debug(rtwdev, RTW89_DBG_TXRX,
  2376. "roc send null-0 failed: %d\n", ret);
  2377. rtw89_for_each_rtwvif(rtwdev, tmp)
  2378. if (tmp->sub_entity_idx == rtwvif->sub_entity_idx)
  2379. tmp->offchan = false;
  2380. rtw89_core_handle_sta_pending_tx(rtwdev, rtwvif);
  2381. queue_work(rtwdev->txq_wq, &rtwdev->txq_work);
  2382. if (hw->conf.flags & IEEE80211_CONF_IDLE)
  2383. ieee80211_queue_delayed_work(hw, &roc->roc_work,
  2384. RTW89_ROC_IDLE_TIMEOUT);
  2385. }
  2386. void rtw89_roc_work(struct work_struct *work)
  2387. {
  2388. struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif,
  2389. roc.roc_work.work);
  2390. struct rtw89_dev *rtwdev = rtwvif->rtwdev;
  2391. struct rtw89_roc *roc = &rtwvif->roc;
  2392. mutex_lock(&rtwdev->mutex);
  2393. switch (roc->state) {
  2394. case RTW89_ROC_IDLE:
  2395. rtw89_enter_ips_by_hwflags(rtwdev);
  2396. break;
  2397. case RTW89_ROC_MGMT:
  2398. case RTW89_ROC_NORMAL:
  2399. rtw89_roc_end(rtwdev, rtwvif);
  2400. break;
  2401. default:
  2402. break;
  2403. }
  2404. mutex_unlock(&rtwdev->mutex);
  2405. }
  2406. static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev,
  2407. u32 throughput, u64 cnt)
  2408. {
  2409. if (cnt < 100)
  2410. return RTW89_TFC_IDLE;
  2411. if (throughput > 50)
  2412. return RTW89_TFC_HIGH;
  2413. if (throughput > 10)
  2414. return RTW89_TFC_MID;
  2415. if (throughput > 2)
  2416. return RTW89_TFC_LOW;
  2417. return RTW89_TFC_ULTRA_LOW;
  2418. }
  2419. static bool rtw89_traffic_stats_calc(struct rtw89_dev *rtwdev,
  2420. struct rtw89_traffic_stats *stats)
  2421. {
  2422. enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
  2423. enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
  2424. stats->tx_throughput_raw = (u32)(stats->tx_unicast >> RTW89_TP_SHIFT);
  2425. stats->rx_throughput_raw = (u32)(stats->rx_unicast >> RTW89_TP_SHIFT);
  2426. ewma_tp_add(&stats->tx_ewma_tp, stats->tx_throughput_raw);
  2427. ewma_tp_add(&stats->rx_ewma_tp, stats->rx_throughput_raw);
  2428. stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp);
  2429. stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp);
  2430. stats->tx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->tx_throughput,
  2431. stats->tx_cnt);
  2432. stats->rx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->rx_throughput,
  2433. stats->rx_cnt);
  2434. stats->tx_avg_len = stats->tx_cnt ?
  2435. DIV_ROUND_DOWN_ULL(stats->tx_unicast, stats->tx_cnt) : 0;
  2436. stats->rx_avg_len = stats->rx_cnt ?
  2437. DIV_ROUND_DOWN_ULL(stats->rx_unicast, stats->rx_cnt) : 0;
  2438. stats->tx_unicast = 0;
  2439. stats->rx_unicast = 0;
  2440. stats->tx_cnt = 0;
  2441. stats->rx_cnt = 0;
  2442. stats->rx_tf_periodic = stats->rx_tf_acc;
  2443. stats->rx_tf_acc = 0;
  2444. if (tx_tfc_lv != stats->tx_tfc_lv || rx_tfc_lv != stats->rx_tfc_lv)
  2445. return true;
  2446. return false;
  2447. }
  2448. static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
  2449. {
  2450. struct rtw89_vif *rtwvif;
  2451. bool tfc_changed;
  2452. tfc_changed = rtw89_traffic_stats_calc(rtwdev, &rtwdev->stats);
  2453. rtw89_for_each_rtwvif(rtwdev, rtwvif) {
  2454. rtw89_traffic_stats_calc(rtwdev, &rtwvif->stats);
  2455. rtw89_fw_h2c_tp_offload(rtwdev, rtwvif);
  2456. }
  2457. return tfc_changed;
  2458. }
  2459. static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  2460. {
  2461. if ((rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
  2462. rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT) ||
  2463. rtwvif->tdls_peer)
  2464. return;
  2465. if (rtwvif->offchan)
  2466. return;
  2467. if (rtwvif->stats.tx_tfc_lv == RTW89_TFC_IDLE &&
  2468. rtwvif->stats.rx_tfc_lv == RTW89_TFC_IDLE)
  2469. rtw89_enter_lps(rtwdev, rtwvif, true);
  2470. }
  2471. static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
  2472. {
  2473. struct rtw89_vif *rtwvif;
  2474. rtw89_for_each_rtwvif(rtwdev, rtwvif)
  2475. rtw89_vif_enter_lps(rtwdev, rtwvif);
  2476. }
  2477. static void rtw89_core_rfk_track(struct rtw89_dev *rtwdev)
  2478. {
  2479. enum rtw89_entity_mode mode;
  2480. mode = rtw89_get_entity_mode(rtwdev);
  2481. if (mode == RTW89_ENTITY_MODE_MCC)
  2482. return;
  2483. rtw89_chip_rfk_track(rtwdev);
  2484. }
  2485. void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
  2486. {
  2487. enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev);
  2488. if (mode == RTW89_ENTITY_MODE_MCC)
  2489. rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_P2P_PS_CHANGE);
  2490. else
  2491. rtw89_process_p2p_ps(rtwdev, vif);
  2492. }
  2493. void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
  2494. struct rtw89_traffic_stats *stats)
  2495. {
  2496. stats->tx_unicast = 0;
  2497. stats->rx_unicast = 0;
  2498. stats->tx_cnt = 0;
  2499. stats->rx_cnt = 0;
  2500. ewma_tp_init(&stats->tx_ewma_tp);
  2501. ewma_tp_init(&stats->rx_ewma_tp);
  2502. }
  2503. static void rtw89_track_work(struct work_struct *work)
  2504. {
  2505. struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
  2506. track_work.work);
  2507. bool tfc_changed;
  2508. if (test_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags))
  2509. return;
  2510. mutex_lock(&rtwdev->mutex);
  2511. if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
  2512. goto out;
  2513. ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
  2514. RTW89_TRACK_WORK_PERIOD);
  2515. tfc_changed = rtw89_traffic_stats_track(rtwdev);
  2516. if (rtwdev->scanning)
  2517. goto out;
  2518. rtw89_leave_lps(rtwdev);
  2519. if (tfc_changed) {
  2520. rtw89_hci_recalc_int_mit(rtwdev);
  2521. rtw89_btc_ntfy_wl_sta(rtwdev);
  2522. }
  2523. rtw89_mac_bf_monitor_track(rtwdev);
  2524. rtw89_phy_stat_track(rtwdev);
  2525. rtw89_phy_env_monitor_track(rtwdev);
  2526. rtw89_phy_dig(rtwdev);
  2527. rtw89_core_rfk_track(rtwdev);
  2528. rtw89_phy_ra_update(rtwdev);
  2529. rtw89_phy_cfo_track(rtwdev);
  2530. rtw89_phy_tx_path_div_track(rtwdev);
  2531. rtw89_phy_antdiv_track(rtwdev);
  2532. rtw89_phy_ul_tb_ctrl_track(rtwdev);
  2533. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0))
  2534. rtw89_tas_track(rtwdev);
  2535. #endif
  2536. rtw89_chanctx_track(rtwdev);
  2537. if (rtwdev->lps_enabled && !rtwdev->btc.lps)
  2538. rtw89_enter_lps_track(rtwdev);
  2539. out:
  2540. mutex_unlock(&rtwdev->mutex);
  2541. }
  2542. u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size)
  2543. {
  2544. unsigned long bit;
  2545. bit = find_first_zero_bit(addr, size);
  2546. if (bit < size)
  2547. set_bit(bit, addr);
  2548. return bit;
  2549. }
  2550. void rtw89_core_release_bit_map(unsigned long *addr, u8 bit)
  2551. {
  2552. clear_bit(bit, addr);
  2553. }
  2554. void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits)
  2555. {
  2556. bitmap_zero(addr, nbits);
  2557. }
  2558. int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev,
  2559. struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
  2560. {
  2561. const struct rtw89_chip_info *chip = rtwdev->chip;
  2562. struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
  2563. struct rtw89_ba_cam_entry *entry = NULL, *tmp;
  2564. u8 idx;
  2565. int i;
  2566. lockdep_assert_held(&rtwdev->mutex);
  2567. idx = rtw89_core_acquire_bit_map(cam_info->ba_cam_map, chip->bacam_num);
  2568. if (idx == chip->bacam_num) {
  2569. /* allocate a static BA CAM to tid=0/5, so replace the existing
  2570. * one if BA CAM is full. Hardware will process the original tid
  2571. * automatically.
  2572. */
  2573. if (tid != 0 && tid != 5)
  2574. return -ENOSPC;
  2575. for_each_set_bit(i, cam_info->ba_cam_map, chip->bacam_num) {
  2576. tmp = &cam_info->ba_cam_entry[i];
  2577. if (tmp->tid == 0 || tmp->tid == 5)
  2578. continue;
  2579. idx = i;
  2580. entry = tmp;
  2581. list_del(&entry->list);
  2582. break;
  2583. }
  2584. if (!entry)
  2585. return -ENOSPC;
  2586. } else {
  2587. entry = &cam_info->ba_cam_entry[idx];
  2588. }
  2589. entry->tid = tid;
  2590. list_add_tail(&entry->list, &rtwsta->ba_cam_list);
  2591. *cam_idx = idx;
  2592. return 0;
  2593. }
  2594. int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev,
  2595. struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
  2596. {
  2597. struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
  2598. struct rtw89_ba_cam_entry *entry = NULL, *tmp;
  2599. u8 idx;
  2600. lockdep_assert_held(&rtwdev->mutex);
  2601. list_for_each_entry_safe(entry, tmp, &rtwsta->ba_cam_list, list) {
  2602. if (entry->tid != tid)
  2603. continue;
  2604. idx = entry - cam_info->ba_cam_entry;
  2605. list_del(&entry->list);
  2606. rtw89_core_release_bit_map(cam_info->ba_cam_map, idx);
  2607. *cam_idx = idx;
  2608. return 0;
  2609. }
  2610. return -ENOENT;
  2611. }
  2612. #define RTW89_TYPE_MAPPING(_type) \
  2613. case NL80211_IFTYPE_ ## _type: \
  2614. rtwvif->wifi_role = RTW89_WIFI_ROLE_ ## _type; \
  2615. break
  2616. void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc)
  2617. {
  2618. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  2619. switch (vif->type) {
  2620. case NL80211_IFTYPE_STATION:
  2621. if (vif->p2p)
  2622. rtwvif->wifi_role = RTW89_WIFI_ROLE_P2P_CLIENT;
  2623. else
  2624. rtwvif->wifi_role = RTW89_WIFI_ROLE_STATION;
  2625. break;
  2626. case NL80211_IFTYPE_AP:
  2627. if (vif->p2p)
  2628. rtwvif->wifi_role = RTW89_WIFI_ROLE_P2P_GO;
  2629. else
  2630. rtwvif->wifi_role = RTW89_WIFI_ROLE_AP;
  2631. break;
  2632. RTW89_TYPE_MAPPING(ADHOC);
  2633. RTW89_TYPE_MAPPING(MONITOR);
  2634. RTW89_TYPE_MAPPING(MESH_POINT);
  2635. default:
  2636. WARN_ON(1);
  2637. break;
  2638. }
  2639. switch (vif->type) {
  2640. case NL80211_IFTYPE_AP:
  2641. case NL80211_IFTYPE_MESH_POINT:
  2642. rtwvif->net_type = RTW89_NET_TYPE_AP_MODE;
  2643. rtwvif->self_role = RTW89_SELF_ROLE_AP;
  2644. break;
  2645. case NL80211_IFTYPE_ADHOC:
  2646. rtwvif->net_type = RTW89_NET_TYPE_AD_HOC;
  2647. rtwvif->self_role = RTW89_SELF_ROLE_CLIENT;
  2648. break;
  2649. case NL80211_IFTYPE_STATION:
  2650. if (assoc) {
  2651. rtwvif->net_type = RTW89_NET_TYPE_INFRA;
  2652. rtwvif->trigger = vif->bss_conf.he_support;
  2653. } else {
  2654. rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
  2655. rtwvif->trigger = false;
  2656. }
  2657. rtwvif->self_role = RTW89_SELF_ROLE_CLIENT;
  2658. rtwvif->addr_cam.sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL;
  2659. break;
  2660. case NL80211_IFTYPE_MONITOR:
  2661. break;
  2662. default:
  2663. WARN_ON(1);
  2664. break;
  2665. }
  2666. }
  2667. int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
  2668. struct ieee80211_vif *vif,
  2669. struct ieee80211_sta *sta)
  2670. {
  2671. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  2672. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  2673. struct rtw89_hal *hal = &rtwdev->hal;
  2674. u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num;
  2675. int i;
  2676. int ret;
  2677. rtwsta->rtwdev = rtwdev;
  2678. rtwsta->rtwvif = rtwvif;
  2679. rtwsta->prev_rssi = 0;
  2680. INIT_LIST_HEAD(&rtwsta->ba_cam_list);
  2681. skb_queue_head_init(&rtwsta->roc_queue);
  2682. for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
  2683. rtw89_core_txq_init(rtwdev, sta->txq[i]);
  2684. ewma_rssi_init(&rtwsta->avg_rssi);
  2685. ewma_snr_init(&rtwsta->avg_snr);
  2686. for (i = 0; i < ant_num; i++) {
  2687. ewma_rssi_init(&rtwsta->rssi[i]);
  2688. ewma_evm_init(&rtwsta->evm_min[i]);
  2689. ewma_evm_init(&rtwsta->evm_max[i]);
  2690. }
  2691. if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
  2692. /* for station mode, assign the mac_id from itself */
  2693. rtwsta->mac_id = rtwvif->mac_id;
  2694. /* must do rtw89_reg_6ghz_power_recalc() before rfk channel */
  2695. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)
  2696. rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, true);
  2697. #endif
  2698. rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
  2699. BTC_ROLE_MSTS_STA_CONN_START);
  2700. rtw89_chip_rfk_channel(rtwdev);
  2701. } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
  2702. rtwsta->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
  2703. RTW89_MAX_MAC_ID_NUM);
  2704. if (rtwsta->mac_id == RTW89_MAX_MAC_ID_NUM)
  2705. return -ENOSPC;
  2706. ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
  2707. if (ret) {
  2708. rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
  2709. rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
  2710. return ret;
  2711. }
  2712. ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
  2713. RTW89_ROLE_CREATE);
  2714. if (ret) {
  2715. rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
  2716. rtw89_warn(rtwdev, "failed to send h2c role info\n");
  2717. return ret;
  2718. }
  2719. rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE);
  2720. }
  2721. return 0;
  2722. }
  2723. int rtw89_core_sta_disassoc(struct rtw89_dev *rtwdev,
  2724. struct ieee80211_vif *vif,
  2725. struct ieee80211_sta *sta)
  2726. {
  2727. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  2728. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  2729. if (vif->type == NL80211_IFTYPE_STATION)
  2730. rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, false);
  2731. rtwdev->total_sta_assoc--;
  2732. if (sta->tdls)
  2733. rtwvif->tdls_peer--;
  2734. rtwsta->disassoc = true;
  2735. return 0;
  2736. }
  2737. int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
  2738. struct ieee80211_vif *vif,
  2739. struct ieee80211_sta *sta)
  2740. {
  2741. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  2742. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  2743. int ret;
  2744. rtw89_mac_bf_monitor_calc(rtwdev, sta, true);
  2745. rtw89_mac_bf_disassoc(rtwdev, vif, sta);
  2746. rtw89_core_free_sta_pending_ba(rtwdev, sta);
  2747. rtw89_core_free_sta_pending_forbid_ba(rtwdev, sta);
  2748. rtw89_core_free_sta_pending_roc_tx(rtwdev, sta);
  2749. if (vif->type == NL80211_IFTYPE_AP || sta->tdls)
  2750. rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
  2751. if (sta->tdls)
  2752. rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
  2753. if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
  2754. rtw89_vif_type_mapping(vif, false);
  2755. rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, true);
  2756. }
  2757. ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
  2758. if (ret) {
  2759. rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
  2760. return ret;
  2761. }
  2762. ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, true);
  2763. if (ret) {
  2764. rtw89_warn(rtwdev, "failed to send h2c join info\n");
  2765. return ret;
  2766. }
  2767. /* update cam aid mac_id net_type */
  2768. ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
  2769. if (ret) {
  2770. rtw89_warn(rtwdev, "failed to send h2c cam\n");
  2771. return ret;
  2772. }
  2773. return ret;
  2774. }
  2775. int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
  2776. struct ieee80211_vif *vif,
  2777. struct ieee80211_sta *sta)
  2778. {
  2779. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  2780. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  2781. struct rtw89_bssid_cam_entry *bssid_cam = rtw89_get_bssid_cam_of(rtwvif, rtwsta);
  2782. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
  2783. rtwvif->sub_entity_idx);
  2784. int ret;
  2785. if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
  2786. if (sta->tdls) {
  2787. ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif, bssid_cam, sta->addr);
  2788. if (ret) {
  2789. rtw89_warn(rtwdev, "failed to send h2c init bssid cam for TDLS\n");
  2790. return ret;
  2791. }
  2792. }
  2793. ret = rtw89_cam_init_addr_cam(rtwdev, &rtwsta->addr_cam, bssid_cam);
  2794. if (ret) {
  2795. rtw89_warn(rtwdev, "failed to send h2c init addr cam\n");
  2796. return ret;
  2797. }
  2798. }
  2799. ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
  2800. if (ret) {
  2801. rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
  2802. return ret;
  2803. }
  2804. ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, false);
  2805. if (ret) {
  2806. rtw89_warn(rtwdev, "failed to send h2c join info\n");
  2807. return ret;
  2808. }
  2809. /* update cam aid mac_id net_type */
  2810. ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
  2811. if (ret) {
  2812. rtw89_warn(rtwdev, "failed to send h2c cam\n");
  2813. return ret;
  2814. }
  2815. rtwdev->total_sta_assoc++;
  2816. if (sta->tdls)
  2817. rtwvif->tdls_peer++;
  2818. rtw89_phy_ra_assoc(rtwdev, sta);
  2819. rtw89_mac_bf_assoc(rtwdev, vif, sta);
  2820. rtw89_mac_bf_monitor_calc(rtwdev, sta, false);
  2821. if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
  2822. struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
  2823. if (bss_conf->he_support &&
  2824. !(bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE))
  2825. rtwsta->er_cap = true;
  2826. rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
  2827. BTC_ROLE_MSTS_STA_CONN_END);
  2828. rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template, chan);
  2829. rtw89_phy_ul_tb_assoc(rtwdev, rtwvif);
  2830. ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif, rtwsta->mac_id);
  2831. if (ret) {
  2832. rtw89_warn(rtwdev, "failed to send h2c general packet\n");
  2833. return ret;
  2834. }
  2835. }
  2836. return ret;
  2837. }
  2838. int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
  2839. struct ieee80211_vif *vif,
  2840. struct ieee80211_sta *sta)
  2841. {
  2842. struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
  2843. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  2844. int ret;
  2845. if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
  2846. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0)
  2847. rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, false);
  2848. #endif
  2849. rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
  2850. BTC_ROLE_MSTS_STA_DIS_CONN);
  2851. } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
  2852. rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
  2853. ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
  2854. RTW89_ROLE_REMOVE);
  2855. if (ret) {
  2856. rtw89_warn(rtwdev, "failed to send h2c role info\n");
  2857. return ret;
  2858. }
  2859. rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_REMOTE_STA_CHANGE);
  2860. }
  2861. return 0;
  2862. }
  2863. static void _rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
  2864. struct ieee80211_sta *sta,
  2865. struct cfg80211_tid_cfg *tid_conf)
  2866. {
  2867. struct ieee80211_txq *txq;
  2868. struct rtw89_txq *rtwtxq;
  2869. u32 mask = tid_conf->mask;
  2870. u8 tids = tid_conf->tids;
  2871. int tids_nbit = BITS_PER_BYTE;
  2872. int i;
  2873. for (i = 0; i < tids_nbit; i++, tids >>= 1) {
  2874. if (!tids)
  2875. break;
  2876. if (!(tids & BIT(0)))
  2877. continue;
  2878. txq = sta->txq[i];
  2879. rtwtxq = (struct rtw89_txq *)txq->drv_priv;
  2880. if (mask & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
  2881. if (tid_conf->ampdu == NL80211_TID_CONFIG_ENABLE) {
  2882. clear_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
  2883. } else {
  2884. if (test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags))
  2885. ieee80211_stop_tx_ba_session(sta, txq->tid);
  2886. spin_lock_bh(&rtwdev->ba_lock);
  2887. list_del_init(&rtwtxq->list);
  2888. set_bit(RTW89_TXQ_F_FORBID_BA, &rtwtxq->flags);
  2889. spin_unlock_bh(&rtwdev->ba_lock);
  2890. }
  2891. }
  2892. if (mask & BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL) && tids == 0xff) {
  2893. if (tid_conf->amsdu == NL80211_TID_CONFIG_ENABLE)
  2894. sta->max_amsdu_subframes = 0;
  2895. else
  2896. sta->max_amsdu_subframes = 1;
  2897. }
  2898. }
  2899. }
  2900. void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
  2901. struct ieee80211_sta *sta,
  2902. struct cfg80211_tid_config *tid_config)
  2903. {
  2904. int i;
  2905. for (i = 0; i < tid_config->n_tid_conf; i++)
  2906. _rtw89_core_set_tid_config(rtwdev, sta,
  2907. &tid_config->tid_conf[i]);
  2908. }
  2909. static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev,
  2910. struct ieee80211_sta_ht_cap *ht_cap)
  2911. {
  2912. static const __le16 highest[RF_PATH_MAX] = {
  2913. cpu_to_le16(150), cpu_to_le16(300), cpu_to_le16(450), cpu_to_le16(600),
  2914. };
  2915. struct rtw89_hal *hal = &rtwdev->hal;
  2916. u8 nss = hal->rx_nss;
  2917. int i;
  2918. ht_cap->ht_supported = true;
  2919. ht_cap->cap = 0;
  2920. ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 |
  2921. IEEE80211_HT_CAP_MAX_AMSDU |
  2922. IEEE80211_HT_CAP_TX_STBC |
  2923. (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
  2924. ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING;
  2925. ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
  2926. IEEE80211_HT_CAP_DSSSCCK40 |
  2927. IEEE80211_HT_CAP_SGI_40;
  2928. ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
  2929. ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
  2930. ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
  2931. for (i = 0; i < nss; i++)
  2932. ht_cap->mcs.rx_mask[i] = 0xFF;
  2933. ht_cap->mcs.rx_mask[4] = 0x01;
  2934. ht_cap->mcs.rx_highest = highest[nss - 1];
  2935. }
  2936. static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
  2937. struct ieee80211_sta_vht_cap *vht_cap)
  2938. {
  2939. static const __le16 highest_bw80[RF_PATH_MAX] = {
  2940. cpu_to_le16(433), cpu_to_le16(867), cpu_to_le16(1300), cpu_to_le16(1733),
  2941. };
  2942. static const __le16 highest_bw160[RF_PATH_MAX] = {
  2943. cpu_to_le16(867), cpu_to_le16(1733), cpu_to_le16(2600), cpu_to_le16(3467),
  2944. };
  2945. const struct rtw89_chip_info *chip = rtwdev->chip;
  2946. const __le16 *highest = chip->support_bw160 ? highest_bw160 : highest_bw80;
  2947. struct rtw89_hal *hal = &rtwdev->hal;
  2948. u16 tx_mcs_map = 0, rx_mcs_map = 0;
  2949. u8 sts_cap = 3;
  2950. int i;
  2951. for (i = 0; i < 8; i++) {
  2952. if (i < hal->tx_nss)
  2953. tx_mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
  2954. else
  2955. tx_mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
  2956. if (i < hal->rx_nss)
  2957. rx_mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
  2958. else
  2959. rx_mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
  2960. }
  2961. vht_cap->vht_supported = true;
  2962. vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
  2963. IEEE80211_VHT_CAP_SHORT_GI_80 |
  2964. IEEE80211_VHT_CAP_RXSTBC_1 |
  2965. IEEE80211_VHT_CAP_HTC_VHT |
  2966. IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
  2967. 0;
  2968. vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
  2969. vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
  2970. vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
  2971. IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
  2972. vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
  2973. if (chip->support_bw160)
  2974. vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
  2975. IEEE80211_VHT_CAP_SHORT_GI_160;
  2976. vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map);
  2977. vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map);
  2978. vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1];
  2979. vht_cap->vht_mcs.tx_highest = highest[hal->tx_nss - 1];
  2980. }
  2981. #define RTW89_SBAND_IFTYPES_NR 2
  2982. static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
  2983. enum nl80211_band band,
  2984. struct ieee80211_supported_band *sband)
  2985. {
  2986. const struct rtw89_chip_info *chip = rtwdev->chip;
  2987. struct rtw89_hal *hal = &rtwdev->hal;
  2988. struct ieee80211_sband_iftype_data *iftype_data;
  2989. bool no_ng16 = (chip->chip_id == RTL8852A && hal->cv == CHIP_CBV) ||
  2990. (chip->chip_id == RTL8852B && hal->cv == CHIP_CAV);
  2991. u16 mcs_map = 0;
  2992. int i;
  2993. int nss = hal->rx_nss;
  2994. int idx = 0;
  2995. iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL);
  2996. if (!iftype_data)
  2997. return;
  2998. for (i = 0; i < 8; i++) {
  2999. if (i < nss)
  3000. mcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
  3001. else
  3002. mcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
  3003. }
  3004. for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
  3005. struct ieee80211_sta_he_cap *he_cap;
  3006. u8 *mac_cap_info;
  3007. u8 *phy_cap_info;
  3008. switch (i) {
  3009. case NL80211_IFTYPE_STATION:
  3010. case NL80211_IFTYPE_AP:
  3011. break;
  3012. default:
  3013. continue;
  3014. }
  3015. if (idx >= RTW89_SBAND_IFTYPES_NR) {
  3016. rtw89_warn(rtwdev, "run out of iftype_data\n");
  3017. break;
  3018. }
  3019. iftype_data[idx].types_mask = BIT(i);
  3020. he_cap = &iftype_data[idx].he_cap;
  3021. mac_cap_info = he_cap->he_cap_elem.mac_cap_info;
  3022. phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
  3023. he_cap->has_he = true;
  3024. mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
  3025. if (i == NL80211_IFTYPE_STATION)
  3026. mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
  3027. mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
  3028. IEEE80211_HE_MAC_CAP2_BSR;
  3029. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
  3030. mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2;
  3031. #else
  3032. phy_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2;
  3033. #endif
  3034. if (i == NL80211_IFTYPE_AP)
  3035. mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL;
  3036. mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS |
  3037. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
  3038. IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
  3039. #else
  3040. IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU;
  3041. #endif
  3042. if (i == NL80211_IFTYPE_STATION)
  3043. mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
  3044. if (band == NL80211_BAND_2GHZ) {
  3045. phy_cap_info[0] =
  3046. IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
  3047. } else {
  3048. phy_cap_info[0] =
  3049. IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
  3050. if (chip->support_bw160)
  3051. phy_cap_info[0] |= IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
  3052. }
  3053. phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
  3054. IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
  3055. IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
  3056. phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
  3057. IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
  3058. IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
  3059. IEEE80211_HE_PHY_CAP2_DOPPLER_TX;
  3060. phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM;
  3061. if (i == NL80211_IFTYPE_STATION)
  3062. phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM |
  3063. IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2;
  3064. if (i == NL80211_IFTYPE_AP)
  3065. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
  3066. phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
  3067. #else
  3068. phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA;
  3069. #endif
  3070. phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
  3071. IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
  3072. if (chip->support_bw160)
  3073. phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
  3074. phy_cap_info[5] = no_ng16 ? 0 :
  3075. IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
  3076. IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
  3077. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
  3078. phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
  3079. IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
  3080. IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
  3081. IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE;
  3082. phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
  3083. IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
  3084. IEEE80211_HE_PHY_CAP7_MAX_NC_1;
  3085. #else
  3086. phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
  3087. IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
  3088. IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
  3089. IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE;
  3090. phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
  3091. IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
  3092. IEEE80211_HE_PHY_CAP7_MAX_NC_1;
  3093. #endif
  3094. phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
  3095. IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
  3096. IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
  3097. if (chip->support_bw160)
  3098. phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
  3099. IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
  3100. phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
  3101. IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
  3102. IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
  3103. IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
  3104. #if defined(IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US)
  3105. IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
  3106. #else
  3107. u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
  3108. IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
  3109. #endif
  3110. if (i == NL80211_IFTYPE_STATION)
  3111. phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
  3112. he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
  3113. he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
  3114. if (chip->support_bw160) {
  3115. he_cap->he_mcs_nss_supp.rx_mcs_160 = cpu_to_le16(mcs_map);
  3116. he_cap->he_mcs_nss_supp.tx_mcs_160 = cpu_to_le16(mcs_map);
  3117. }
  3118. if (band == NL80211_BAND_6GHZ) {
  3119. __le16 capa;
  3120. capa = le16_encode_bits(IEEE80211_HT_MPDU_DENSITY_NONE,
  3121. IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
  3122. le16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
  3123. IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
  3124. le16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
  3125. IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
  3126. iftype_data[idx].he_6ghz_capa.capa = capa;
  3127. }
  3128. idx++;
  3129. }
  3130. #if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 7, 0)
  3131. _ieee80211_set_sband_iftype_data(sband, iftype_data, idx);
  3132. #else
  3133. sband->iftype_data = iftype_data;
  3134. sband->n_iftype_data = idx;
  3135. #endif
  3136. }
  3137. static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
  3138. {
  3139. struct ieee80211_hw *hw = rtwdev->hw;
  3140. struct ieee80211_supported_band *sband_2ghz = NULL, *sband_5ghz = NULL;
  3141. struct ieee80211_supported_band *sband_6ghz = NULL;
  3142. u32 size = sizeof(struct ieee80211_supported_band);
  3143. u8 support_bands = rtwdev->chip->support_bands;
  3144. if (support_bands & BIT(NL80211_BAND_2GHZ)) {
  3145. sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL);
  3146. if (!sband_2ghz)
  3147. goto err;
  3148. rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
  3149. rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
  3150. hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
  3151. }
  3152. if (support_bands & BIT(NL80211_BAND_5GHZ)) {
  3153. sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL);
  3154. if (!sband_5ghz)
  3155. goto err;
  3156. rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
  3157. rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
  3158. rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
  3159. hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
  3160. }
  3161. if (support_bands & BIT(NL80211_BAND_6GHZ)) {
  3162. sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL);
  3163. if (!sband_6ghz)
  3164. goto err;
  3165. rtw89_init_he_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
  3166. hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz;
  3167. }
  3168. return 0;
  3169. err:
  3170. hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
  3171. hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
  3172. hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
  3173. if (sband_2ghz)
  3174. kfree((__force void *)sband_2ghz->iftype_data);
  3175. if (sband_5ghz)
  3176. kfree((__force void *)sband_5ghz->iftype_data);
  3177. if (sband_6ghz)
  3178. kfree((__force void *)sband_6ghz->iftype_data);
  3179. kfree(sband_2ghz);
  3180. kfree(sband_5ghz);
  3181. kfree(sband_6ghz);
  3182. return -ENOMEM;
  3183. }
  3184. static void rtw89_core_clr_supported_band(struct rtw89_dev *rtwdev)
  3185. {
  3186. struct ieee80211_hw *hw = rtwdev->hw;
  3187. if (hw->wiphy->bands[NL80211_BAND_2GHZ])
  3188. kfree((__force void *)hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data);
  3189. if (hw->wiphy->bands[NL80211_BAND_5GHZ])
  3190. kfree((__force void *)hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data);
  3191. if (hw->wiphy->bands[NL80211_BAND_6GHZ])
  3192. kfree((__force void *)hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data);
  3193. kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
  3194. kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
  3195. kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]);
  3196. hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
  3197. hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
  3198. hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
  3199. }
  3200. static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
  3201. {
  3202. int i;
  3203. for (i = 0; i < RTW89_PHY_MAX; i++)
  3204. skb_queue_head_init(&rtwdev->ppdu_sts.rx_queue[i]);
  3205. for (i = 0; i < RTW89_PHY_MAX; i++)
  3206. rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX;
  3207. }
  3208. void rtw89_core_update_beacon_work(struct work_struct *work)
  3209. {
  3210. struct rtw89_dev *rtwdev;
  3211. struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif,
  3212. update_beacon_work);
  3213. if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE)
  3214. return;
  3215. rtwdev = rtwvif->rtwdev;
  3216. mutex_lock(&rtwdev->mutex);
  3217. rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
  3218. mutex_unlock(&rtwdev->mutex);
  3219. }
  3220. int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond)
  3221. {
  3222. struct completion *cmpl = &wait->completion;
  3223. unsigned long timeout;
  3224. unsigned int cur;
  3225. cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond);
  3226. if (cur != RTW89_WAIT_COND_IDLE)
  3227. return -EBUSY;
  3228. timeout = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT);
  3229. if (timeout == 0) {
  3230. atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
  3231. return -ETIMEDOUT;
  3232. }
  3233. if (wait->data.err)
  3234. return -EFAULT;
  3235. return 0;
  3236. }
  3237. void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond,
  3238. const struct rtw89_completion_data *data)
  3239. {
  3240. unsigned int cur;
  3241. cur = atomic_cmpxchg(&wait->cond, cond, RTW89_WAIT_COND_IDLE);
  3242. if (cur != cond)
  3243. return;
  3244. wait->data = *data;
  3245. complete(&wait->completion);
  3246. }
  3247. void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event)
  3248. {
  3249. u16 bt_req_len;
  3250. switch (event) {
  3251. case RTW89_BTC_HMSG_SET_BT_REQ_SLOT:
  3252. bt_req_len = rtw89_coex_query_bt_req_len(rtwdev, RTW89_PHY_0);
  3253. rtw89_debug(rtwdev, RTW89_DBG_BTC,
  3254. "coex updates BT req len to %d TU\n", bt_req_len);
  3255. rtw89_queue_chanctx_change(rtwdev, RTW89_CHANCTX_BT_SLOT_CHANGE);
  3256. break;
  3257. default:
  3258. if (event < NUM_OF_RTW89_BTC_HMSG)
  3259. rtw89_debug(rtwdev, RTW89_DBG_BTC,
  3260. "unhandled BTC HMSG event: %d\n", event);
  3261. else
  3262. rtw89_warn(rtwdev,
  3263. "unrecognized BTC HMSG event: %d\n", event);
  3264. break;
  3265. }
  3266. }
  3267. int rtw89_core_start(struct rtw89_dev *rtwdev)
  3268. {
  3269. int ret;
  3270. rtwdev->mac.qta_mode = RTW89_QTA_SCC;
  3271. ret = rtw89_mac_init(rtwdev);
  3272. if (ret) {
  3273. rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret);
  3274. return ret;
  3275. }
  3276. rtw89_btc_ntfy_poweron(rtwdev);
  3277. /* efuse process */
  3278. /* pre-config BB/RF, BB reset/RFC reset */
  3279. ret = rtw89_chip_disable_bb_rf(rtwdev);
  3280. if (ret)
  3281. return ret;
  3282. ret = rtw89_chip_enable_bb_rf(rtwdev);
  3283. if (ret)
  3284. return ret;
  3285. rtw89_phy_init_bb_reg(rtwdev);
  3286. rtw89_phy_init_rf_reg(rtwdev, false);
  3287. rtw89_btc_ntfy_init(rtwdev, BTC_MODE_NORMAL);
  3288. rtw89_phy_dm_init(rtwdev);
  3289. rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
  3290. rtw89_mac_update_rts_threshold(rtwdev, RTW89_MAC_0);
  3291. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0))
  3292. rtw89_tas_reset(rtwdev);
  3293. #endif
  3294. ret = rtw89_hci_start(rtwdev);
  3295. if (ret) {
  3296. rtw89_err(rtwdev, "failed to start hci\n");
  3297. return ret;
  3298. }
  3299. ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
  3300. RTW89_TRACK_WORK_PERIOD);
  3301. set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
  3302. rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
  3303. rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.log.enable);
  3304. rtw89_fw_h2c_init_ba_cam(rtwdev);
  3305. return 0;
  3306. }
  3307. void rtw89_core_stop(struct rtw89_dev *rtwdev)
  3308. {
  3309. struct rtw89_btc *btc = &rtwdev->btc;
  3310. /* Prvent to stop twice; enter_ips and ops_stop */
  3311. if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
  3312. return;
  3313. rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_OFF);
  3314. clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
  3315. mutex_unlock(&rtwdev->mutex);
  3316. cancel_work_sync(&rtwdev->c2h_work);
  3317. cancel_work_sync(&rtwdev->cancel_6ghz_probe_work);
  3318. cancel_work_sync(&btc->eapol_notify_work);
  3319. cancel_work_sync(&btc->arp_notify_work);
  3320. cancel_work_sync(&btc->dhcp_notify_work);
  3321. cancel_work_sync(&btc->icmp_notify_work);
  3322. cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work);
  3323. cancel_delayed_work_sync(&rtwdev->track_work);
  3324. cancel_delayed_work_sync(&rtwdev->chanctx_work);
  3325. cancel_delayed_work_sync(&rtwdev->coex_act1_work);
  3326. cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work);
  3327. cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work);
  3328. cancel_delayed_work_sync(&rtwdev->cfo_track_work);
  3329. cancel_delayed_work_sync(&rtwdev->forbid_ba_work);
  3330. cancel_delayed_work_sync(&rtwdev->antdiv_work);
  3331. mutex_lock(&rtwdev->mutex);
  3332. rtw89_btc_ntfy_poweroff(rtwdev);
  3333. rtw89_hci_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, true);
  3334. rtw89_mac_flush_txq(rtwdev, BIT(rtwdev->hw->queues) - 1, true);
  3335. rtw89_hci_stop(rtwdev);
  3336. rtw89_hci_deinit(rtwdev);
  3337. rtw89_mac_pwr_off(rtwdev);
  3338. rtw89_hci_reset(rtwdev);
  3339. }
  3340. int rtw89_core_init(struct rtw89_dev *rtwdev)
  3341. {
  3342. struct rtw89_btc *btc = &rtwdev->btc;
  3343. u8 band;
  3344. INIT_LIST_HEAD(&rtwdev->ba_list);
  3345. INIT_LIST_HEAD(&rtwdev->forbid_ba_list);
  3346. INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
  3347. INIT_LIST_HEAD(&rtwdev->early_h2c_list);
  3348. for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
  3349. if (!(rtwdev->chip->support_bands & BIT(band)))
  3350. continue;
  3351. INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
  3352. }
  3353. INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
  3354. INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
  3355. INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
  3356. INIT_DELAYED_WORK(&rtwdev->track_work, rtw89_track_work);
  3357. INIT_DELAYED_WORK(&rtwdev->chanctx_work, rtw89_chanctx_work);
  3358. INIT_DELAYED_WORK(&rtwdev->coex_act1_work, rtw89_coex_act1_work);
  3359. INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
  3360. INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
  3361. INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
  3362. INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
  3363. INIT_DELAYED_WORK(&rtwdev->antdiv_work, rtw89_phy_antdiv_work);
  3364. rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
  3365. if (!rtwdev->txq_wq)
  3366. return -ENOMEM;
  3367. spin_lock_init(&rtwdev->ba_lock);
  3368. spin_lock_init(&rtwdev->rpwm_lock);
  3369. mutex_init(&rtwdev->mutex);
  3370. mutex_init(&rtwdev->rf_mutex);
  3371. rtwdev->total_sta_assoc = 0;
  3372. rtw89_init_wait(&rtwdev->mcc.wait);
  3373. rtw89_init_wait(&rtwdev->mac.fw_ofld_wait);
  3374. INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
  3375. INIT_WORK(&rtwdev->ips_work, rtw89_ips_work);
  3376. INIT_WORK(&rtwdev->load_firmware_work, rtw89_load_firmware_work);
  3377. INIT_WORK(&rtwdev->cancel_6ghz_probe_work, rtw89_cancel_6ghz_probe_work);
  3378. skb_queue_head_init(&rtwdev->c2h_queue);
  3379. rtw89_core_ppdu_sts_init(rtwdev);
  3380. rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
  3381. rtwdev->hal.rx_fltr = DEFAULT_AX_RX_FLTR;
  3382. INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
  3383. INIT_WORK(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work);
  3384. INIT_WORK(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work);
  3385. INIT_WORK(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work);
  3386. init_completion(&rtwdev->fw.req.completion);
  3387. schedule_work(&rtwdev->load_firmware_work);
  3388. rtw89_ser_init(rtwdev);
  3389. rtw89_entity_init(rtwdev);
  3390. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0))
  3391. rtw89_tas_init(rtwdev);
  3392. #endif
  3393. return 0;
  3394. }
  3395. EXPORT_SYMBOL(rtw89_core_init);
  3396. void rtw89_core_deinit(struct rtw89_dev *rtwdev)
  3397. {
  3398. rtw89_ser_deinit(rtwdev);
  3399. rtw89_unload_firmware(rtwdev);
  3400. rtw89_fw_free_all_early_h2c(rtwdev);
  3401. destroy_workqueue(rtwdev->txq_wq);
  3402. mutex_destroy(&rtwdev->rf_mutex);
  3403. mutex_destroy(&rtwdev->mutex);
  3404. }
  3405. EXPORT_SYMBOL(rtw89_core_deinit);
  3406. void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
  3407. const u8 *mac_addr, bool hw_scan)
  3408. {
  3409. const struct rtw89_chan *chan = rtw89_chan_get(rtwdev,
  3410. rtwvif->sub_entity_idx);
  3411. rtwdev->scanning = true;
  3412. rtw89_leave_lps(rtwdev);
  3413. if (hw_scan)
  3414. rtw89_leave_ips_by_hwflags(rtwdev);
  3415. ether_addr_copy(rtwvif->mac_addr, mac_addr);
  3416. rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, chan->band_type);
  3417. rtw89_chip_rfk_scan(rtwdev, true);
  3418. rtw89_hci_recalc_int_mit(rtwdev);
  3419. rtw89_phy_config_edcca(rtwdev, true);
  3420. rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, mac_addr);
  3421. }
  3422. void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
  3423. struct ieee80211_vif *vif, bool hw_scan)
  3424. {
  3425. struct rtw89_vif *rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL;
  3426. if (!rtwvif)
  3427. return;
  3428. ether_addr_copy(rtwvif->mac_addr, vif->addr);
  3429. rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
  3430. rtw89_chip_rfk_scan(rtwdev, false);
  3431. rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
  3432. rtw89_phy_config_edcca(rtwdev, false);
  3433. rtwdev->scanning = false;
  3434. rtwdev->dig.bypass_dig = true;
  3435. if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE))
  3436. ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
  3437. }
  3438. static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
  3439. {
  3440. const struct rtw89_chip_info *chip = rtwdev->chip;
  3441. int ret;
  3442. u8 val;
  3443. u8 cv;
  3444. cv = rtw89_read32_mask(rtwdev, R_AX_SYS_CFG1, B_AX_CHIP_VER_MASK);
  3445. if (chip->chip_id == RTL8852A && cv <= CHIP_CBV) {
  3446. if (rtw89_read32(rtwdev, R_AX_GPIO0_7_FUNC_SEL) == RTW89_R32_DEAD)
  3447. cv = CHIP_CAV;
  3448. else
  3449. cv = CHIP_CBV;
  3450. }
  3451. rtwdev->hal.cv = cv;
  3452. if (chip->chip_id == RTL8852B || chip->chip_id == RTL8851B) {
  3453. ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_CV, &val);
  3454. if (ret)
  3455. return;
  3456. rtwdev->hal.acv = u8_get_bits(val, XTAL_SI_ACV_MASK);
  3457. }
  3458. }
  3459. static void rtw89_core_setup_phycap(struct rtw89_dev *rtwdev)
  3460. {
  3461. rtwdev->hal.support_cckpd =
  3462. !(rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV) &&
  3463. !(rtwdev->chip->chip_id == RTL8852B && rtwdev->hal.cv <= CHIP_CAV);
  3464. rtwdev->hal.support_igi =
  3465. rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV;
  3466. }
  3467. static void rtw89_core_setup_rfe_parms(struct rtw89_dev *rtwdev)
  3468. {
  3469. const struct rtw89_chip_info *chip = rtwdev->chip;
  3470. const struct rtw89_rfe_parms_conf *conf = chip->rfe_parms_conf;
  3471. struct rtw89_efuse *efuse = &rtwdev->efuse;
  3472. const struct rtw89_rfe_parms *sel;
  3473. u8 rfe_type = efuse->rfe_type;
  3474. if (!conf) {
  3475. sel = chip->dflt_parms;
  3476. goto out;
  3477. }
  3478. while (conf->rfe_parms) {
  3479. if (rfe_type == conf->rfe_type) {
  3480. sel = conf->rfe_parms;
  3481. goto out;
  3482. }
  3483. conf++;
  3484. }
  3485. sel = chip->dflt_parms;
  3486. out:
  3487. rtwdev->rfe_parms = rtw89_load_rfe_data_from_fw(rtwdev, sel);
  3488. rtw89_load_txpwr_table(rtwdev, rtwdev->rfe_parms->byr_tbl);
  3489. }
  3490. static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
  3491. {
  3492. int ret;
  3493. ret = rtw89_mac_partial_init(rtwdev, false);
  3494. if (ret)
  3495. return ret;
  3496. ret = rtw89_parse_efuse_map(rtwdev);
  3497. if (ret)
  3498. return ret;
  3499. ret = rtw89_parse_phycap_map(rtwdev);
  3500. if (ret)
  3501. return ret;
  3502. ret = rtw89_mac_setup_phycap(rtwdev);
  3503. if (ret)
  3504. return ret;
  3505. rtw89_core_setup_phycap(rtwdev);
  3506. rtw89_mac_pwr_off(rtwdev);
  3507. return 0;
  3508. }
  3509. static int rtw89_chip_board_info_setup(struct rtw89_dev *rtwdev)
  3510. {
  3511. rtw89_chip_fem_setup(rtwdev);
  3512. return 0;
  3513. }
  3514. int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
  3515. {
  3516. int ret;
  3517. rtw89_read_chip_ver(rtwdev);
  3518. ret = rtw89_wait_firmware_completion(rtwdev);
  3519. if (ret) {
  3520. rtw89_err(rtwdev, "failed to wait firmware completion\n");
  3521. return ret;
  3522. }
  3523. ret = rtw89_fw_recognize(rtwdev);
  3524. if (ret) {
  3525. rtw89_err(rtwdev, "failed to recognize firmware\n");
  3526. return ret;
  3527. }
  3528. ret = rtw89_chip_efuse_info_setup(rtwdev);
  3529. if (ret)
  3530. return ret;
  3531. ret = rtw89_fw_recognize_elements(rtwdev);
  3532. if (ret) {
  3533. rtw89_err(rtwdev, "failed to recognize firmware elements\n");
  3534. return ret;
  3535. }
  3536. ret = rtw89_chip_board_info_setup(rtwdev);
  3537. if (ret)
  3538. return ret;
  3539. rtw89_core_setup_rfe_parms(rtwdev);
  3540. rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
  3541. return 0;
  3542. }
  3543. EXPORT_SYMBOL(rtw89_chip_info_setup);
  3544. static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
  3545. {
  3546. struct ieee80211_hw *hw = rtwdev->hw;
  3547. struct rtw89_efuse *efuse = &rtwdev->efuse;
  3548. struct rtw89_hal *hal = &rtwdev->hal;
  3549. int ret;
  3550. int tx_headroom = IEEE80211_HT_CTL_LEN;
  3551. hw->vif_data_size = sizeof(struct rtw89_vif);
  3552. hw->sta_data_size = sizeof(struct rtw89_sta);
  3553. hw->txq_data_size = sizeof(struct rtw89_txq);
  3554. hw->chanctx_data_size = sizeof(struct rtw89_chanctx_cfg);
  3555. SET_IEEE80211_PERM_ADDR(hw, efuse->addr);
  3556. hw->extra_tx_headroom = tx_headroom;
  3557. hw->queues = IEEE80211_NUM_ACS;
  3558. hw->max_rx_aggregation_subframes = RTW89_MAX_RX_AGG_NUM;
  3559. hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM;
  3560. hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
  3561. ieee80211_hw_set(hw, SIGNAL_DBM);
  3562. ieee80211_hw_set(hw, HAS_RATE_CONTROL);
  3563. ieee80211_hw_set(hw, MFP_CAPABLE);
  3564. ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
  3565. ieee80211_hw_set(hw, AMPDU_AGGREGATION);
  3566. ieee80211_hw_set(hw, RX_INCLUDES_FCS);
  3567. ieee80211_hw_set(hw, TX_AMSDU);
  3568. ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
  3569. ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
  3570. ieee80211_hw_set(hw, SUPPORTS_PS);
  3571. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0))
  3572. ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
  3573. #endif
  3574. ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
  3575. ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
  3576. ieee80211_hw_set(hw, WANT_MONITOR_VIF);
  3577. /* ref: description of rtw89_mcc_get_tbtt_ofst() in chan.c */
  3578. ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
  3579. if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw))
  3580. ieee80211_hw_set(hw, CONNECTION_MONITOR);
  3581. hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
  3582. BIT(NL80211_IFTYPE_AP) |
  3583. BIT(NL80211_IFTYPE_P2P_CLIENT) |
  3584. BIT(NL80211_IFTYPE_P2P_GO);
  3585. if (hal->ant_diversity) {
  3586. hw->wiphy->available_antennas_tx = 0x3;
  3587. hw->wiphy->available_antennas_rx = 0x3;
  3588. } else {
  3589. hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1;
  3590. hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1;
  3591. }
  3592. hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
  3593. WIPHY_FLAG_TDLS_EXTERNAL_SETUP |
  3594. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)
  3595. WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_SPLIT_SCAN_6GHZ;
  3596. #else
  3597. WIPHY_FLAG_AP_UAPSD;
  3598. #endif
  3599. hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
  3600. hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
  3601. hw->wiphy->max_scan_ie_len = RTW89_SCANOFLD_MAX_IE_LEN;
  3602. #ifdef CONFIG_PM
  3603. hw->wiphy->wowlan = rtwdev->chip->wowlan_stub;
  3604. #endif
  3605. hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
  3606. hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
  3607. hw->wiphy->tid_config_support.vif |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL);
  3608. hw->wiphy->tid_config_support.peer |= BIT(NL80211_TID_CONFIG_ATTR_AMSDU_CTRL);
  3609. hw->wiphy->max_remain_on_channel_duration = 1000;
  3610. wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
  3611. ret = rtw89_core_set_supported_band(rtwdev);
  3612. if (ret) {
  3613. rtw89_err(rtwdev, "failed to set supported band\n");
  3614. return ret;
  3615. }
  3616. ret = rtw89_regd_setup(rtwdev);
  3617. if (ret) {
  3618. rtw89_err(rtwdev, "failed to set up regd\n");
  3619. goto err_free_supported_band;
  3620. }
  3621. #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 11, 0)
  3622. hw->wiphy->sar_capa = &rtw89_sar_capa;
  3623. #endif
  3624. ret = ieee80211_register_hw(hw);
  3625. if (ret) {
  3626. rtw89_err(rtwdev, "failed to register hw\n");
  3627. goto err_free_supported_band;
  3628. }
  3629. ret = rtw89_regd_init(rtwdev, rtw89_regd_notifier);
  3630. if (ret) {
  3631. rtw89_err(rtwdev, "failed to init regd\n");
  3632. goto err_unregister_hw;
  3633. }
  3634. return 0;
  3635. err_unregister_hw:
  3636. ieee80211_unregister_hw(hw);
  3637. err_free_supported_band:
  3638. rtw89_core_clr_supported_band(rtwdev);
  3639. return ret;
  3640. }
  3641. static void rtw89_core_unregister_hw(struct rtw89_dev *rtwdev)
  3642. {
  3643. struct ieee80211_hw *hw = rtwdev->hw;
  3644. ieee80211_unregister_hw(hw);
  3645. rtw89_core_clr_supported_band(rtwdev);
  3646. }
  3647. int rtw89_core_register(struct rtw89_dev *rtwdev)
  3648. {
  3649. int ret;
  3650. ret = rtw89_core_register_hw(rtwdev);
  3651. if (ret) {
  3652. rtw89_err(rtwdev, "failed to register core hw\n");
  3653. return ret;
  3654. }
  3655. rtw89_debugfs_init(rtwdev);
  3656. return 0;
  3657. }
  3658. EXPORT_SYMBOL(rtw89_core_register);
  3659. void rtw89_core_unregister(struct rtw89_dev *rtwdev)
  3660. {
  3661. rtw89_core_unregister_hw(rtwdev);
  3662. }
  3663. EXPORT_SYMBOL(rtw89_core_unregister);
  3664. struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
  3665. u32 bus_data_size,
  3666. const struct rtw89_chip_info *chip)
  3667. {
  3668. struct rtw89_fw_info early_fw = {};
  3669. const struct firmware *firmware;
  3670. struct ieee80211_hw *hw;
  3671. struct rtw89_dev *rtwdev;
  3672. struct ieee80211_ops *ops;
  3673. u32 driver_data_size;
  3674. int fw_format = -1;
  3675. bool no_chanctx;
  3676. firmware = rtw89_early_fw_feature_recognize(device, chip, &early_fw, &fw_format);
  3677. ops = kmemdup(&rtw89_ops, sizeof(rtw89_ops), GFP_KERNEL);
  3678. if (!ops)
  3679. goto err;
  3680. no_chanctx = chip->support_chanctx_num == 0 ||
  3681. !RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &early_fw) ||
  3682. !RTW89_CHK_FW_FEATURE(BEACON_FILTER, &early_fw);
  3683. if (no_chanctx) {
  3684. ops->add_chanctx = NULL;
  3685. ops->remove_chanctx = NULL;
  3686. ops->change_chanctx = NULL;
  3687. ops->assign_vif_chanctx = NULL;
  3688. ops->unassign_vif_chanctx = NULL;
  3689. ops->remain_on_channel = NULL;
  3690. ops->cancel_remain_on_channel = NULL;
  3691. }
  3692. driver_data_size = sizeof(struct rtw89_dev) + bus_data_size;
  3693. hw = ieee80211_alloc_hw(driver_data_size, ops);
  3694. if (!hw)
  3695. goto err;
  3696. hw->wiphy->iface_combinations = rtw89_iface_combs;
  3697. if (no_chanctx || chip->support_chanctx_num == 1)
  3698. hw->wiphy->n_iface_combinations = 1;
  3699. else
  3700. hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtw89_iface_combs);
  3701. rtwdev = hw->priv;
  3702. rtwdev->hw = hw;
  3703. rtwdev->dev = device;
  3704. rtwdev->ops = ops;
  3705. rtwdev->chip = chip;
  3706. rtwdev->fw.req.firmware = firmware;
  3707. rtwdev->fw.fw_format = fw_format;
  3708. rtw89_debug(rtwdev, RTW89_DBG_FW, "probe driver %s chanctx\n",
  3709. no_chanctx ? "without" : "with");
  3710. return rtwdev;
  3711. err:
  3712. kfree(ops);
  3713. release_firmware(firmware);
  3714. return NULL;
  3715. }
  3716. EXPORT_SYMBOL(rtw89_alloc_ieee80211_hw);
  3717. void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev)
  3718. {
  3719. kfree(rtwdev->ops);
  3720. kfree(rtwdev->rfe_data);
  3721. release_firmware(rtwdev->fw.req.firmware);
  3722. ieee80211_free_hw(rtwdev->hw);
  3723. }
  3724. EXPORT_SYMBOL(rtw89_free_ieee80211_hw);
  3725. MODULE_AUTHOR("Realtek Corporation");
  3726. MODULE_DESCRIPTION("Realtek 802.11ax wireless core module");
  3727. MODULE_LICENSE("Dual BSD/GPL");