ixgbe_main.c 277 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200
  1. /*******************************************************************************
  2. Intel 10 Gigabit PCI Express Linux driver
  3. Copyright(c) 1999 - 2016 Intel Corporation.
  4. This program is free software; you can redistribute it and/or modify it
  5. under the terms and conditions of the GNU General Public License,
  6. version 2, as published by the Free Software Foundation.
  7. This program is distributed in the hope it will be useful, but WITHOUT
  8. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  9. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  10. more details.
  11. You should have received a copy of the GNU General Public License along with
  12. this program; if not, write to the Free Software Foundation, Inc.,
  13. 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  14. The full GNU General Public License is included in this distribution in
  15. the file called "COPYING".
  16. Contact Information:
  17. Linux NICS <linux.nics@intel.com>
  18. e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  19. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  20. *******************************************************************************/
  21. #include <linux/types.h>
  22. #include <linux/module.h>
  23. #include <linux/pci.h>
  24. #include <linux/netdevice.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/string.h>
  27. #include <linux/in.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/ip.h>
  30. #include <linux/tcp.h>
  31. #include <linux/sctp.h>
  32. #include <linux/pkt_sched.h>
  33. #include <linux/ipv6.h>
  34. #include <linux/slab.h>
  35. #include <net/checksum.h>
  36. #include <net/ip6_checksum.h>
  37. #include <linux/etherdevice.h>
  38. #include <linux/ethtool.h>
  39. #include <linux/if.h>
  40. #include <linux/if_vlan.h>
  41. #include <linux/if_macvlan.h>
  42. #include <linux/if_bridge.h>
  43. #include <linux/prefetch.h>
  44. #include <scsi/fc/fc_fcoe.h>
  45. #include <net/udp_tunnel.h>
  46. #include <net/pkt_cls.h>
  47. #include <net/tc_act/tc_gact.h>
  48. #include <net/tc_act/tc_mirred.h>
  49. #include "ixgbe.h"
  50. #include "ixgbe_common.h"
  51. #include "ixgbe_dcb_82599.h"
  52. #include "ixgbe_sriov.h"
  53. #include "ixgbe_model.h"
  54. char ixgbe_driver_name[] = "ixgbe";
  55. static const char ixgbe_driver_string[] =
  56. "Intel(R) 10 Gigabit PCI Express Network Driver";
  57. #ifdef IXGBE_FCOE
  58. char ixgbe_default_device_descr[] =
  59. "Intel(R) 10 Gigabit Network Connection";
  60. #else
  61. static char ixgbe_default_device_descr[] =
  62. "Intel(R) 10 Gigabit Network Connection";
  63. #endif
  64. #define DRV_VERSION "4.4.0-k"
  65. const char ixgbe_driver_version[] = DRV_VERSION;
  66. static const char ixgbe_copyright[] =
  67. "Copyright (c) 1999-2016 Intel Corporation.";
  68. static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
  69. static const struct ixgbe_info *ixgbe_info_tbl[] = {
  70. [board_82598] = &ixgbe_82598_info,
  71. [board_82599] = &ixgbe_82599_info,
  72. [board_X540] = &ixgbe_X540_info,
  73. [board_X550] = &ixgbe_X550_info,
  74. [board_X550EM_x] = &ixgbe_X550EM_x_info,
  75. [board_x550em_a] = &ixgbe_x550em_a_info,
  76. };
  77. /* ixgbe_pci_tbl - PCI Device ID Table
  78. *
  79. * Wildcard entries (PCI_ANY_ID) should come last
  80. * Last entry must be all 0s
  81. *
  82. * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  83. * Class, Class Mask, private data (not used) }
  84. */
  85. static const struct pci_device_id ixgbe_pci_tbl[] = {
  86. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
  87. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
  88. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
  89. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
  90. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
  91. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
  92. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
  93. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
  94. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
  95. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
  96. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
  97. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
  98. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
  99. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
  100. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
  101. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
  102. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
  103. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
  104. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
  105. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
  106. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
  107. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
  108. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
  109. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
  110. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
  111. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
  112. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
  113. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
  114. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
  115. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
  116. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
  117. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
  118. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
  119. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
  120. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
  121. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
  122. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
  123. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
  124. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
  125. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
  126. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
  127. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
  128. {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
  129. /* required last entry */
  130. {0, }
  131. };
  132. MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
  133. #ifdef CONFIG_IXGBE_DCA
  134. static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
  135. void *p);
  136. static struct notifier_block dca_notifier = {
  137. .notifier_call = ixgbe_notify_dca,
  138. .next = NULL,
  139. .priority = 0
  140. };
  141. #endif
  142. #ifdef CONFIG_PCI_IOV
  143. static unsigned int max_vfs;
  144. module_param(max_vfs, uint, 0);
  145. MODULE_PARM_DESC(max_vfs,
  146. "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)");
  147. #endif /* CONFIG_PCI_IOV */
  148. static unsigned int allow_unsupported_sfp;
  149. module_param(allow_unsupported_sfp, uint, 0);
  150. MODULE_PARM_DESC(allow_unsupported_sfp,
  151. "Allow unsupported and untested SFP+ modules on 82599-based adapters");
  152. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  153. static int debug = -1;
  154. module_param(debug, int, 0);
  155. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  156. MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  157. MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
  158. MODULE_LICENSE("GPL");
  159. MODULE_VERSION(DRV_VERSION);
  160. static struct workqueue_struct *ixgbe_wq;
  161. static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
  162. static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
  163. u32 reg, u16 *value)
  164. {
  165. struct pci_dev *parent_dev;
  166. struct pci_bus *parent_bus;
  167. parent_bus = adapter->pdev->bus->parent;
  168. if (!parent_bus)
  169. return -1;
  170. parent_dev = parent_bus->self;
  171. if (!parent_dev)
  172. return -1;
  173. if (!pci_is_pcie(parent_dev))
  174. return -1;
  175. pcie_capability_read_word(parent_dev, reg, value);
  176. if (*value == IXGBE_FAILED_READ_CFG_WORD &&
  177. ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
  178. return -1;
  179. return 0;
  180. }
  181. static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
  182. {
  183. struct ixgbe_hw *hw = &adapter->hw;
  184. u16 link_status = 0;
  185. int err;
  186. hw->bus.type = ixgbe_bus_type_pci_express;
  187. /* Get the negotiated link width and speed from PCI config space of the
  188. * parent, as this device is behind a switch
  189. */
  190. err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status);
  191. /* assume caller will handle error case */
  192. if (err)
  193. return err;
  194. hw->bus.width = ixgbe_convert_bus_width(link_status);
  195. hw->bus.speed = ixgbe_convert_bus_speed(link_status);
  196. return 0;
  197. }
  198. /**
  199. * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
  200. * @hw: hw specific details
  201. *
  202. * This function is used by probe to determine whether a device's PCI-Express
  203. * bandwidth details should be gathered from the parent bus instead of from the
  204. * device. Used to ensure that various locations all have the correct device ID
  205. * checks.
  206. */
  207. static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
  208. {
  209. switch (hw->device_id) {
  210. case IXGBE_DEV_ID_82599_SFP_SF_QP:
  211. case IXGBE_DEV_ID_82599_QSFP_SF_QP:
  212. return true;
  213. default:
  214. return false;
  215. }
  216. }
  217. static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
  218. int expected_gts)
  219. {
  220. struct ixgbe_hw *hw = &adapter->hw;
  221. int max_gts = 0;
  222. enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
  223. enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
  224. struct pci_dev *pdev;
  225. /* Some devices are not connected over PCIe and thus do not negotiate
  226. * speed. These devices do not have valid bus info, and thus any report
  227. * we generate may not be correct.
  228. */
  229. if (hw->bus.type == ixgbe_bus_type_internal)
  230. return;
  231. /* determine whether to use the parent device */
  232. if (ixgbe_pcie_from_parent(&adapter->hw))
  233. pdev = adapter->pdev->bus->parent->self;
  234. else
  235. pdev = adapter->pdev;
  236. if (pcie_get_minimum_link(pdev, &speed, &width) ||
  237. speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
  238. e_dev_warn("Unable to determine PCI Express bandwidth.\n");
  239. return;
  240. }
  241. switch (speed) {
  242. case PCIE_SPEED_2_5GT:
  243. /* 8b/10b encoding reduces max throughput by 20% */
  244. max_gts = 2 * width;
  245. break;
  246. case PCIE_SPEED_5_0GT:
  247. /* 8b/10b encoding reduces max throughput by 20% */
  248. max_gts = 4 * width;
  249. break;
  250. case PCIE_SPEED_8_0GT:
  251. /* 128b/130b encoding reduces throughput by less than 2% */
  252. max_gts = 8 * width;
  253. break;
  254. default:
  255. e_dev_warn("Unable to determine PCI Express bandwidth.\n");
  256. return;
  257. }
  258. e_dev_info("PCI Express bandwidth of %dGT/s available\n",
  259. max_gts);
  260. e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
  261. (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
  262. speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
  263. speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
  264. "Unknown"),
  265. width,
  266. (speed == PCIE_SPEED_2_5GT ? "20%" :
  267. speed == PCIE_SPEED_5_0GT ? "20%" :
  268. speed == PCIE_SPEED_8_0GT ? "<2%" :
  269. "Unknown"));
  270. if (max_gts < expected_gts) {
  271. e_dev_warn("This is not sufficient for optimal performance of this card.\n");
  272. e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
  273. expected_gts);
  274. e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
  275. }
  276. }
  277. static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
  278. {
  279. if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
  280. !test_bit(__IXGBE_REMOVING, &adapter->state) &&
  281. !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
  282. queue_work(ixgbe_wq, &adapter->service_task);
  283. }
  284. static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
  285. {
  286. struct ixgbe_adapter *adapter = hw->back;
  287. if (!hw->hw_addr)
  288. return;
  289. hw->hw_addr = NULL;
  290. e_dev_err("Adapter removed\n");
  291. if (test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
  292. ixgbe_service_event_schedule(adapter);
  293. }
  294. static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
  295. {
  296. u32 value;
  297. /* The following check not only optimizes a bit by not
  298. * performing a read on the status register when the
  299. * register just read was a status register read that
  300. * returned IXGBE_FAILED_READ_REG. It also blocks any
  301. * potential recursion.
  302. */
  303. if (reg == IXGBE_STATUS) {
  304. ixgbe_remove_adapter(hw);
  305. return;
  306. }
  307. value = ixgbe_read_reg(hw, IXGBE_STATUS);
  308. if (value == IXGBE_FAILED_READ_REG)
  309. ixgbe_remove_adapter(hw);
  310. }
  311. /**
  312. * ixgbe_read_reg - Read from device register
  313. * @hw: hw specific details
  314. * @reg: offset of register to read
  315. *
  316. * Returns : value read or IXGBE_FAILED_READ_REG if removed
  317. *
  318. * This function is used to read device registers. It checks for device
  319. * removal by confirming any read that returns all ones by checking the
  320. * status register value for all ones. This function avoids reading from
  321. * the hardware if a removal was previously detected in which case it
  322. * returns IXGBE_FAILED_READ_REG (all ones).
  323. */
  324. u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
  325. {
  326. u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
  327. u32 value;
  328. if (ixgbe_removed(reg_addr))
  329. return IXGBE_FAILED_READ_REG;
  330. if (unlikely(hw->phy.nw_mng_if_sel &
  331. IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) {
  332. struct ixgbe_adapter *adapter;
  333. int i;
  334. for (i = 0; i < 200; ++i) {
  335. value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
  336. if (likely(!value))
  337. goto writes_completed;
  338. if (value == IXGBE_FAILED_READ_REG) {
  339. ixgbe_remove_adapter(hw);
  340. return IXGBE_FAILED_READ_REG;
  341. }
  342. udelay(5);
  343. }
  344. adapter = hw->back;
  345. e_warn(hw, "register writes incomplete %08x\n", value);
  346. }
  347. writes_completed:
  348. value = readl(reg_addr + reg);
  349. if (unlikely(value == IXGBE_FAILED_READ_REG))
  350. ixgbe_check_remove(hw, reg);
  351. return value;
  352. }
  353. static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
  354. {
  355. u16 value;
  356. pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
  357. if (value == IXGBE_FAILED_READ_CFG_WORD) {
  358. ixgbe_remove_adapter(hw);
  359. return true;
  360. }
  361. return false;
  362. }
  363. u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
  364. {
  365. struct ixgbe_adapter *adapter = hw->back;
  366. u16 value;
  367. if (ixgbe_removed(hw->hw_addr))
  368. return IXGBE_FAILED_READ_CFG_WORD;
  369. pci_read_config_word(adapter->pdev, reg, &value);
  370. if (value == IXGBE_FAILED_READ_CFG_WORD &&
  371. ixgbe_check_cfg_remove(hw, adapter->pdev))
  372. return IXGBE_FAILED_READ_CFG_WORD;
  373. return value;
  374. }
  375. #ifdef CONFIG_PCI_IOV
  376. static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
  377. {
  378. struct ixgbe_adapter *adapter = hw->back;
  379. u32 value;
  380. if (ixgbe_removed(hw->hw_addr))
  381. return IXGBE_FAILED_READ_CFG_DWORD;
  382. pci_read_config_dword(adapter->pdev, reg, &value);
  383. if (value == IXGBE_FAILED_READ_CFG_DWORD &&
  384. ixgbe_check_cfg_remove(hw, adapter->pdev))
  385. return IXGBE_FAILED_READ_CFG_DWORD;
  386. return value;
  387. }
  388. #endif /* CONFIG_PCI_IOV */
  389. void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
  390. {
  391. struct ixgbe_adapter *adapter = hw->back;
  392. if (ixgbe_removed(hw->hw_addr))
  393. return;
  394. pci_write_config_word(adapter->pdev, reg, value);
  395. }
  396. static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
  397. {
  398. BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
  399. /* flush memory to make sure state is correct before next watchdog */
  400. smp_mb__before_atomic();
  401. clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
  402. }
  403. struct ixgbe_reg_info {
  404. u32 ofs;
  405. char *name;
  406. };
  407. static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
  408. /* General Registers */
  409. {IXGBE_CTRL, "CTRL"},
  410. {IXGBE_STATUS, "STATUS"},
  411. {IXGBE_CTRL_EXT, "CTRL_EXT"},
  412. /* Interrupt Registers */
  413. {IXGBE_EICR, "EICR"},
  414. /* RX Registers */
  415. {IXGBE_SRRCTL(0), "SRRCTL"},
  416. {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
  417. {IXGBE_RDLEN(0), "RDLEN"},
  418. {IXGBE_RDH(0), "RDH"},
  419. {IXGBE_RDT(0), "RDT"},
  420. {IXGBE_RXDCTL(0), "RXDCTL"},
  421. {IXGBE_RDBAL(0), "RDBAL"},
  422. {IXGBE_RDBAH(0), "RDBAH"},
  423. /* TX Registers */
  424. {IXGBE_TDBAL(0), "TDBAL"},
  425. {IXGBE_TDBAH(0), "TDBAH"},
  426. {IXGBE_TDLEN(0), "TDLEN"},
  427. {IXGBE_TDH(0), "TDH"},
  428. {IXGBE_TDT(0), "TDT"},
  429. {IXGBE_TXDCTL(0), "TXDCTL"},
  430. /* List Terminator */
  431. { .name = NULL }
  432. };
  433. /*
  434. * ixgbe_regdump - register printout routine
  435. */
  436. static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
  437. {
  438. int i = 0, j = 0;
  439. char rname[16];
  440. u32 regs[64];
  441. switch (reginfo->ofs) {
  442. case IXGBE_SRRCTL(0):
  443. for (i = 0; i < 64; i++)
  444. regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
  445. break;
  446. case IXGBE_DCA_RXCTRL(0):
  447. for (i = 0; i < 64; i++)
  448. regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
  449. break;
  450. case IXGBE_RDLEN(0):
  451. for (i = 0; i < 64; i++)
  452. regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
  453. break;
  454. case IXGBE_RDH(0):
  455. for (i = 0; i < 64; i++)
  456. regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
  457. break;
  458. case IXGBE_RDT(0):
  459. for (i = 0; i < 64; i++)
  460. regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
  461. break;
  462. case IXGBE_RXDCTL(0):
  463. for (i = 0; i < 64; i++)
  464. regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
  465. break;
  466. case IXGBE_RDBAL(0):
  467. for (i = 0; i < 64; i++)
  468. regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
  469. break;
  470. case IXGBE_RDBAH(0):
  471. for (i = 0; i < 64; i++)
  472. regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
  473. break;
  474. case IXGBE_TDBAL(0):
  475. for (i = 0; i < 64; i++)
  476. regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
  477. break;
  478. case IXGBE_TDBAH(0):
  479. for (i = 0; i < 64; i++)
  480. regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
  481. break;
  482. case IXGBE_TDLEN(0):
  483. for (i = 0; i < 64; i++)
  484. regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
  485. break;
  486. case IXGBE_TDH(0):
  487. for (i = 0; i < 64; i++)
  488. regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
  489. break;
  490. case IXGBE_TDT(0):
  491. for (i = 0; i < 64; i++)
  492. regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
  493. break;
  494. case IXGBE_TXDCTL(0):
  495. for (i = 0; i < 64; i++)
  496. regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
  497. break;
  498. default:
  499. pr_info("%-15s %08x\n", reginfo->name,
  500. IXGBE_READ_REG(hw, reginfo->ofs));
  501. return;
  502. }
  503. for (i = 0; i < 8; i++) {
  504. snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
  505. pr_err("%-15s", rname);
  506. for (j = 0; j < 8; j++)
  507. pr_cont(" %08x", regs[i*8+j]);
  508. pr_cont("\n");
  509. }
  510. }
  511. /*
  512. * ixgbe_dump - Print registers, tx-rings and rx-rings
  513. */
  514. static void ixgbe_dump(struct ixgbe_adapter *adapter)
  515. {
  516. struct net_device *netdev = adapter->netdev;
  517. struct ixgbe_hw *hw = &adapter->hw;
  518. struct ixgbe_reg_info *reginfo;
  519. int n = 0;
  520. struct ixgbe_ring *tx_ring;
  521. struct ixgbe_tx_buffer *tx_buffer;
  522. union ixgbe_adv_tx_desc *tx_desc;
  523. struct my_u0 { u64 a; u64 b; } *u0;
  524. struct ixgbe_ring *rx_ring;
  525. union ixgbe_adv_rx_desc *rx_desc;
  526. struct ixgbe_rx_buffer *rx_buffer_info;
  527. u32 staterr;
  528. int i = 0;
  529. if (!netif_msg_hw(adapter))
  530. return;
  531. /* Print netdevice Info */
  532. if (netdev) {
  533. dev_info(&adapter->pdev->dev, "Net device Info\n");
  534. pr_info("Device Name state "
  535. "trans_start last_rx\n");
  536. pr_info("%-15s %016lX %016lX %016lX\n",
  537. netdev->name,
  538. netdev->state,
  539. dev_trans_start(netdev),
  540. netdev->last_rx);
  541. }
  542. /* Print Registers */
  543. dev_info(&adapter->pdev->dev, "Register Dump\n");
  544. pr_info(" Register Name Value\n");
  545. for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
  546. reginfo->name; reginfo++) {
  547. ixgbe_regdump(hw, reginfo);
  548. }
  549. /* Print TX Ring Summary */
  550. if (!netdev || !netif_running(netdev))
  551. return;
  552. dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
  553. pr_info(" %s %s %s %s\n",
  554. "Queue [NTU] [NTC] [bi(ntc)->dma ]",
  555. "leng", "ntw", "timestamp");
  556. for (n = 0; n < adapter->num_tx_queues; n++) {
  557. tx_ring = adapter->tx_ring[n];
  558. tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
  559. pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
  560. n, tx_ring->next_to_use, tx_ring->next_to_clean,
  561. (u64)dma_unmap_addr(tx_buffer, dma),
  562. dma_unmap_len(tx_buffer, len),
  563. tx_buffer->next_to_watch,
  564. (u64)tx_buffer->time_stamp);
  565. }
  566. /* Print TX Rings */
  567. if (!netif_msg_tx_done(adapter))
  568. goto rx_ring_summary;
  569. dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
  570. /* Transmit Descriptor Formats
  571. *
  572. * 82598 Advanced Transmit Descriptor
  573. * +--------------------------------------------------------------+
  574. * 0 | Buffer Address [63:0] |
  575. * +--------------------------------------------------------------+
  576. * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
  577. * +--------------------------------------------------------------+
  578. * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
  579. *
  580. * 82598 Advanced Transmit Descriptor (Write-Back Format)
  581. * +--------------------------------------------------------------+
  582. * 0 | RSV [63:0] |
  583. * +--------------------------------------------------------------+
  584. * 8 | RSV | STA | NXTSEQ |
  585. * +--------------------------------------------------------------+
  586. * 63 36 35 32 31 0
  587. *
  588. * 82599+ Advanced Transmit Descriptor
  589. * +--------------------------------------------------------------+
  590. * 0 | Buffer Address [63:0] |
  591. * +--------------------------------------------------------------+
  592. * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN |
  593. * +--------------------------------------------------------------+
  594. * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0
  595. *
  596. * 82599+ Advanced Transmit Descriptor (Write-Back Format)
  597. * +--------------------------------------------------------------+
  598. * 0 | RSV [63:0] |
  599. * +--------------------------------------------------------------+
  600. * 8 | RSV | STA | RSV |
  601. * +--------------------------------------------------------------+
  602. * 63 36 35 32 31 0
  603. */
  604. for (n = 0; n < adapter->num_tx_queues; n++) {
  605. tx_ring = adapter->tx_ring[n];
  606. pr_info("------------------------------------\n");
  607. pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
  608. pr_info("------------------------------------\n");
  609. pr_info("%s%s %s %s %s %s\n",
  610. "T [desc] [address 63:0 ] ",
  611. "[PlPOIdStDDt Ln] [bi->dma ] ",
  612. "leng", "ntw", "timestamp", "bi->skb");
  613. for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
  614. tx_desc = IXGBE_TX_DESC(tx_ring, i);
  615. tx_buffer = &tx_ring->tx_buffer_info[i];
  616. u0 = (struct my_u0 *)tx_desc;
  617. if (dma_unmap_len(tx_buffer, len) > 0) {
  618. pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
  619. i,
  620. le64_to_cpu(u0->a),
  621. le64_to_cpu(u0->b),
  622. (u64)dma_unmap_addr(tx_buffer, dma),
  623. dma_unmap_len(tx_buffer, len),
  624. tx_buffer->next_to_watch,
  625. (u64)tx_buffer->time_stamp,
  626. tx_buffer->skb);
  627. if (i == tx_ring->next_to_use &&
  628. i == tx_ring->next_to_clean)
  629. pr_cont(" NTC/U\n");
  630. else if (i == tx_ring->next_to_use)
  631. pr_cont(" NTU\n");
  632. else if (i == tx_ring->next_to_clean)
  633. pr_cont(" NTC\n");
  634. else
  635. pr_cont("\n");
  636. if (netif_msg_pktdata(adapter) &&
  637. tx_buffer->skb)
  638. print_hex_dump(KERN_INFO, "",
  639. DUMP_PREFIX_ADDRESS, 16, 1,
  640. tx_buffer->skb->data,
  641. dma_unmap_len(tx_buffer, len),
  642. true);
  643. }
  644. }
  645. }
  646. /* Print RX Rings Summary */
  647. rx_ring_summary:
  648. dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
  649. pr_info("Queue [NTU] [NTC]\n");
  650. for (n = 0; n < adapter->num_rx_queues; n++) {
  651. rx_ring = adapter->rx_ring[n];
  652. pr_info("%5d %5X %5X\n",
  653. n, rx_ring->next_to_use, rx_ring->next_to_clean);
  654. }
  655. /* Print RX Rings */
  656. if (!netif_msg_rx_status(adapter))
  657. return;
  658. dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
  659. /* Receive Descriptor Formats
  660. *
  661. * 82598 Advanced Receive Descriptor (Read) Format
  662. * 63 1 0
  663. * +-----------------------------------------------------+
  664. * 0 | Packet Buffer Address [63:1] |A0/NSE|
  665. * +----------------------------------------------+------+
  666. * 8 | Header Buffer Address [63:1] | DD |
  667. * +-----------------------------------------------------+
  668. *
  669. *
  670. * 82598 Advanced Receive Descriptor (Write-Back) Format
  671. *
  672. * 63 48 47 32 31 30 21 20 16 15 4 3 0
  673. * +------------------------------------------------------+
  674. * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS |
  675. * | Packet | IP | | | | Type | Type |
  676. * | Checksum | Ident | | | | | |
  677. * +------------------------------------------------------+
  678. * 8 | VLAN Tag | Length | Extended Error | Extended Status |
  679. * +------------------------------------------------------+
  680. * 63 48 47 32 31 20 19 0
  681. *
  682. * 82599+ Advanced Receive Descriptor (Read) Format
  683. * 63 1 0
  684. * +-----------------------------------------------------+
  685. * 0 | Packet Buffer Address [63:1] |A0/NSE|
  686. * +----------------------------------------------+------+
  687. * 8 | Header Buffer Address [63:1] | DD |
  688. * +-----------------------------------------------------+
  689. *
  690. *
  691. * 82599+ Advanced Receive Descriptor (Write-Back) Format
  692. *
  693. * 63 48 47 32 31 30 21 20 17 16 4 3 0
  694. * +------------------------------------------------------+
  695. * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS |
  696. * |/ RTT / PCoE_PARAM | | | CNT | Type | Type |
  697. * |/ Flow Dir Flt ID | | | | | |
  698. * +------------------------------------------------------+
  699. * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
  700. * +------------------------------------------------------+
  701. * 63 48 47 32 31 20 19 0
  702. */
  703. for (n = 0; n < adapter->num_rx_queues; n++) {
  704. rx_ring = adapter->rx_ring[n];
  705. pr_info("------------------------------------\n");
  706. pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
  707. pr_info("------------------------------------\n");
  708. pr_info("%s%s%s",
  709. "R [desc] [ PktBuf A0] ",
  710. "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
  711. "<-- Adv Rx Read format\n");
  712. pr_info("%s%s%s",
  713. "RWB[desc] [PcsmIpSHl PtRs] ",
  714. "[vl er S cks ln] ---------------- [bi->skb ] ",
  715. "<-- Adv Rx Write-Back format\n");
  716. for (i = 0; i < rx_ring->count; i++) {
  717. rx_buffer_info = &rx_ring->rx_buffer_info[i];
  718. rx_desc = IXGBE_RX_DESC(rx_ring, i);
  719. u0 = (struct my_u0 *)rx_desc;
  720. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  721. if (staterr & IXGBE_RXD_STAT_DD) {
  722. /* Descriptor Done */
  723. pr_info("RWB[0x%03X] %016llX "
  724. "%016llX ---------------- %p", i,
  725. le64_to_cpu(u0->a),
  726. le64_to_cpu(u0->b),
  727. rx_buffer_info->skb);
  728. } else {
  729. pr_info("R [0x%03X] %016llX "
  730. "%016llX %016llX %p", i,
  731. le64_to_cpu(u0->a),
  732. le64_to_cpu(u0->b),
  733. (u64)rx_buffer_info->dma,
  734. rx_buffer_info->skb);
  735. if (netif_msg_pktdata(adapter) &&
  736. rx_buffer_info->dma) {
  737. print_hex_dump(KERN_INFO, "",
  738. DUMP_PREFIX_ADDRESS, 16, 1,
  739. page_address(rx_buffer_info->page) +
  740. rx_buffer_info->page_offset,
  741. ixgbe_rx_bufsz(rx_ring), true);
  742. }
  743. }
  744. if (i == rx_ring->next_to_use)
  745. pr_cont(" NTU\n");
  746. else if (i == rx_ring->next_to_clean)
  747. pr_cont(" NTC\n");
  748. else
  749. pr_cont("\n");
  750. }
  751. }
  752. }
  753. static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
  754. {
  755. u32 ctrl_ext;
  756. /* Let firmware take over control of h/w */
  757. ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  758. IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
  759. ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
  760. }
  761. static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
  762. {
  763. u32 ctrl_ext;
  764. /* Let firmware know the driver has taken over */
  765. ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
  766. IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
  767. ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
  768. }
  769. /**
  770. * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
  771. * @adapter: pointer to adapter struct
  772. * @direction: 0 for Rx, 1 for Tx, -1 for other causes
  773. * @queue: queue to map the corresponding interrupt to
  774. * @msix_vector: the vector to map to the corresponding queue
  775. *
  776. */
  777. static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
  778. u8 queue, u8 msix_vector)
  779. {
  780. u32 ivar, index;
  781. struct ixgbe_hw *hw = &adapter->hw;
  782. switch (hw->mac.type) {
  783. case ixgbe_mac_82598EB:
  784. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  785. if (direction == -1)
  786. direction = 0;
  787. index = (((direction * 64) + queue) >> 2) & 0x1F;
  788. ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
  789. ivar &= ~(0xFF << (8 * (queue & 0x3)));
  790. ivar |= (msix_vector << (8 * (queue & 0x3)));
  791. IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
  792. break;
  793. case ixgbe_mac_82599EB:
  794. case ixgbe_mac_X540:
  795. case ixgbe_mac_X550:
  796. case ixgbe_mac_X550EM_x:
  797. case ixgbe_mac_x550em_a:
  798. if (direction == -1) {
  799. /* other causes */
  800. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  801. index = ((queue & 1) * 8);
  802. ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
  803. ivar &= ~(0xFF << index);
  804. ivar |= (msix_vector << index);
  805. IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
  806. break;
  807. } else {
  808. /* tx or rx causes */
  809. msix_vector |= IXGBE_IVAR_ALLOC_VAL;
  810. index = ((16 * (queue & 1)) + (8 * direction));
  811. ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
  812. ivar &= ~(0xFF << index);
  813. ivar |= (msix_vector << index);
  814. IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
  815. break;
  816. }
  817. default:
  818. break;
  819. }
  820. }
  821. static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
  822. u64 qmask)
  823. {
  824. u32 mask;
  825. switch (adapter->hw.mac.type) {
  826. case ixgbe_mac_82598EB:
  827. mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
  828. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
  829. break;
  830. case ixgbe_mac_82599EB:
  831. case ixgbe_mac_X540:
  832. case ixgbe_mac_X550:
  833. case ixgbe_mac_X550EM_x:
  834. case ixgbe_mac_x550em_a:
  835. mask = (qmask & 0xFFFFFFFF);
  836. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
  837. mask = (qmask >> 32);
  838. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
  839. break;
  840. default:
  841. break;
  842. }
  843. }
  844. void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
  845. struct ixgbe_tx_buffer *tx_buffer)
  846. {
  847. if (tx_buffer->skb) {
  848. dev_kfree_skb_any(tx_buffer->skb);
  849. if (dma_unmap_len(tx_buffer, len))
  850. dma_unmap_single(ring->dev,
  851. dma_unmap_addr(tx_buffer, dma),
  852. dma_unmap_len(tx_buffer, len),
  853. DMA_TO_DEVICE);
  854. } else if (dma_unmap_len(tx_buffer, len)) {
  855. dma_unmap_page(ring->dev,
  856. dma_unmap_addr(tx_buffer, dma),
  857. dma_unmap_len(tx_buffer, len),
  858. DMA_TO_DEVICE);
  859. }
  860. tx_buffer->next_to_watch = NULL;
  861. tx_buffer->skb = NULL;
  862. dma_unmap_len_set(tx_buffer, len, 0);
  863. /* tx_buffer must be completely set up in the transmit path */
  864. }
  865. static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
  866. {
  867. struct ixgbe_hw *hw = &adapter->hw;
  868. struct ixgbe_hw_stats *hwstats = &adapter->stats;
  869. int i;
  870. u32 data;
  871. if ((hw->fc.current_mode != ixgbe_fc_full) &&
  872. (hw->fc.current_mode != ixgbe_fc_rx_pause))
  873. return;
  874. switch (hw->mac.type) {
  875. case ixgbe_mac_82598EB:
  876. data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
  877. break;
  878. default:
  879. data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
  880. }
  881. hwstats->lxoffrxc += data;
  882. /* refill credits (no tx hang) if we received xoff */
  883. if (!data)
  884. return;
  885. for (i = 0; i < adapter->num_tx_queues; i++)
  886. clear_bit(__IXGBE_HANG_CHECK_ARMED,
  887. &adapter->tx_ring[i]->state);
  888. }
  889. static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
  890. {
  891. struct ixgbe_hw *hw = &adapter->hw;
  892. struct ixgbe_hw_stats *hwstats = &adapter->stats;
  893. u32 xoff[8] = {0};
  894. u8 tc;
  895. int i;
  896. bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
  897. if (adapter->ixgbe_ieee_pfc)
  898. pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
  899. if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
  900. ixgbe_update_xoff_rx_lfc(adapter);
  901. return;
  902. }
  903. /* update stats for each tc, only valid with PFC enabled */
  904. for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
  905. u32 pxoffrxc;
  906. switch (hw->mac.type) {
  907. case ixgbe_mac_82598EB:
  908. pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
  909. break;
  910. default:
  911. pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
  912. }
  913. hwstats->pxoffrxc[i] += pxoffrxc;
  914. /* Get the TC for given UP */
  915. tc = netdev_get_prio_tc_map(adapter->netdev, i);
  916. xoff[tc] += pxoffrxc;
  917. }
  918. /* disarm tx queues that have received xoff frames */
  919. for (i = 0; i < adapter->num_tx_queues; i++) {
  920. struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
  921. tc = tx_ring->dcb_tc;
  922. if (xoff[tc])
  923. clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
  924. }
  925. }
  926. static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
  927. {
  928. return ring->stats.packets;
  929. }
  930. static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
  931. {
  932. struct ixgbe_adapter *adapter;
  933. struct ixgbe_hw *hw;
  934. u32 head, tail;
  935. if (ring->l2_accel_priv)
  936. adapter = ring->l2_accel_priv->real_adapter;
  937. else
  938. adapter = netdev_priv(ring->netdev);
  939. hw = &adapter->hw;
  940. head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
  941. tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
  942. if (head != tail)
  943. return (head < tail) ?
  944. tail - head : (tail + ring->count - head);
  945. return 0;
  946. }
  947. static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
  948. {
  949. u32 tx_done = ixgbe_get_tx_completed(tx_ring);
  950. u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
  951. u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
  952. clear_check_for_tx_hang(tx_ring);
  953. /*
  954. * Check for a hung queue, but be thorough. This verifies
  955. * that a transmit has been completed since the previous
  956. * check AND there is at least one packet pending. The
  957. * ARMED bit is set to indicate a potential hang. The
  958. * bit is cleared if a pause frame is received to remove
  959. * false hang detection due to PFC or 802.3x frames. By
  960. * requiring this to fail twice we avoid races with
  961. * pfc clearing the ARMED bit and conditions where we
  962. * run the check_tx_hang logic with a transmit completion
  963. * pending but without time to complete it yet.
  964. */
  965. if (tx_done_old == tx_done && tx_pending)
  966. /* make sure it is true for two checks in a row */
  967. return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
  968. &tx_ring->state);
  969. /* update completed stats and continue */
  970. tx_ring->tx_stats.tx_done_old = tx_done;
  971. /* reset the countdown */
  972. clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
  973. return false;
  974. }
  975. /**
  976. * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
  977. * @adapter: driver private struct
  978. **/
  979. static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
  980. {
  981. /* Do the reset outside of interrupt context */
  982. if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
  983. set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
  984. e_warn(drv, "initiating reset due to tx timeout\n");
  985. ixgbe_service_event_schedule(adapter);
  986. }
  987. }
  988. /**
  989. * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
  990. **/
  991. static int ixgbe_tx_maxrate(struct net_device *netdev,
  992. int queue_index, u32 maxrate)
  993. {
  994. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  995. struct ixgbe_hw *hw = &adapter->hw;
  996. u32 bcnrc_val = ixgbe_link_mbps(adapter);
  997. if (!maxrate)
  998. return 0;
  999. /* Calculate the rate factor values to set */
  1000. bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
  1001. bcnrc_val /= maxrate;
  1002. /* clear everything but the rate factor */
  1003. bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
  1004. IXGBE_RTTBCNRC_RF_DEC_MASK;
  1005. /* enable the rate scheduler */
  1006. bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
  1007. IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
  1008. IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
  1009. return 0;
  1010. }
  1011. /**
  1012. * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  1013. * @q_vector: structure containing interrupt and ring information
  1014. * @tx_ring: tx ring to clean
  1015. * @napi_budget: Used to determine if we are in netpoll
  1016. **/
  1017. static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
  1018. struct ixgbe_ring *tx_ring, int napi_budget)
  1019. {
  1020. struct ixgbe_adapter *adapter = q_vector->adapter;
  1021. struct ixgbe_tx_buffer *tx_buffer;
  1022. union ixgbe_adv_tx_desc *tx_desc;
  1023. unsigned int total_bytes = 0, total_packets = 0;
  1024. unsigned int budget = q_vector->tx.work_limit;
  1025. unsigned int i = tx_ring->next_to_clean;
  1026. if (test_bit(__IXGBE_DOWN, &adapter->state))
  1027. return true;
  1028. tx_buffer = &tx_ring->tx_buffer_info[i];
  1029. tx_desc = IXGBE_TX_DESC(tx_ring, i);
  1030. i -= tx_ring->count;
  1031. do {
  1032. union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
  1033. /* if next_to_watch is not set then there is no work pending */
  1034. if (!eop_desc)
  1035. break;
  1036. /* prevent any other reads prior to eop_desc */
  1037. smp_rmb();
  1038. /* if DD is not set pending work has not been completed */
  1039. if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
  1040. break;
  1041. /* clear next_to_watch to prevent false hangs */
  1042. tx_buffer->next_to_watch = NULL;
  1043. /* update the statistics for this packet */
  1044. total_bytes += tx_buffer->bytecount;
  1045. total_packets += tx_buffer->gso_segs;
  1046. /* free the skb */
  1047. napi_consume_skb(tx_buffer->skb, napi_budget);
  1048. /* unmap skb header data */
  1049. dma_unmap_single(tx_ring->dev,
  1050. dma_unmap_addr(tx_buffer, dma),
  1051. dma_unmap_len(tx_buffer, len),
  1052. DMA_TO_DEVICE);
  1053. /* clear tx_buffer data */
  1054. tx_buffer->skb = NULL;
  1055. dma_unmap_len_set(tx_buffer, len, 0);
  1056. /* unmap remaining buffers */
  1057. while (tx_desc != eop_desc) {
  1058. tx_buffer++;
  1059. tx_desc++;
  1060. i++;
  1061. if (unlikely(!i)) {
  1062. i -= tx_ring->count;
  1063. tx_buffer = tx_ring->tx_buffer_info;
  1064. tx_desc = IXGBE_TX_DESC(tx_ring, 0);
  1065. }
  1066. /* unmap any remaining paged data */
  1067. if (dma_unmap_len(tx_buffer, len)) {
  1068. dma_unmap_page(tx_ring->dev,
  1069. dma_unmap_addr(tx_buffer, dma),
  1070. dma_unmap_len(tx_buffer, len),
  1071. DMA_TO_DEVICE);
  1072. dma_unmap_len_set(tx_buffer, len, 0);
  1073. }
  1074. }
  1075. /* move us one more past the eop_desc for start of next pkt */
  1076. tx_buffer++;
  1077. tx_desc++;
  1078. i++;
  1079. if (unlikely(!i)) {
  1080. i -= tx_ring->count;
  1081. tx_buffer = tx_ring->tx_buffer_info;
  1082. tx_desc = IXGBE_TX_DESC(tx_ring, 0);
  1083. }
  1084. /* issue prefetch for next Tx descriptor */
  1085. prefetch(tx_desc);
  1086. /* update budget accounting */
  1087. budget--;
  1088. } while (likely(budget));
  1089. i += tx_ring->count;
  1090. tx_ring->next_to_clean = i;
  1091. u64_stats_update_begin(&tx_ring->syncp);
  1092. tx_ring->stats.bytes += total_bytes;
  1093. tx_ring->stats.packets += total_packets;
  1094. u64_stats_update_end(&tx_ring->syncp);
  1095. q_vector->tx.total_bytes += total_bytes;
  1096. q_vector->tx.total_packets += total_packets;
  1097. if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
  1098. /* schedule immediate reset if we believe we hung */
  1099. struct ixgbe_hw *hw = &adapter->hw;
  1100. e_err(drv, "Detected Tx Unit Hang\n"
  1101. " Tx Queue <%d>\n"
  1102. " TDH, TDT <%x>, <%x>\n"
  1103. " next_to_use <%x>\n"
  1104. " next_to_clean <%x>\n"
  1105. "tx_buffer_info[next_to_clean]\n"
  1106. " time_stamp <%lx>\n"
  1107. " jiffies <%lx>\n",
  1108. tx_ring->queue_index,
  1109. IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
  1110. IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
  1111. tx_ring->next_to_use, i,
  1112. tx_ring->tx_buffer_info[i].time_stamp, jiffies);
  1113. netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
  1114. e_info(probe,
  1115. "tx hang %d detected on queue %d, resetting adapter\n",
  1116. adapter->tx_timeout_count + 1, tx_ring->queue_index);
  1117. /* schedule immediate reset if we believe we hung */
  1118. ixgbe_tx_timeout_reset(adapter);
  1119. /* the adapter is about to reset, no point in enabling stuff */
  1120. return true;
  1121. }
  1122. netdev_tx_completed_queue(txring_txq(tx_ring),
  1123. total_packets, total_bytes);
  1124. #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
  1125. if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
  1126. (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
  1127. /* Make sure that anybody stopping the queue after this
  1128. * sees the new next_to_clean.
  1129. */
  1130. smp_mb();
  1131. if (__netif_subqueue_stopped(tx_ring->netdev,
  1132. tx_ring->queue_index)
  1133. && !test_bit(__IXGBE_DOWN, &adapter->state)) {
  1134. netif_wake_subqueue(tx_ring->netdev,
  1135. tx_ring->queue_index);
  1136. ++tx_ring->tx_stats.restart_queue;
  1137. }
  1138. }
  1139. return !!budget;
  1140. }
  1141. #ifdef CONFIG_IXGBE_DCA
  1142. static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
  1143. struct ixgbe_ring *tx_ring,
  1144. int cpu)
  1145. {
  1146. struct ixgbe_hw *hw = &adapter->hw;
  1147. u32 txctrl = 0;
  1148. u16 reg_offset;
  1149. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1150. txctrl = dca3_get_tag(tx_ring->dev, cpu);
  1151. switch (hw->mac.type) {
  1152. case ixgbe_mac_82598EB:
  1153. reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
  1154. break;
  1155. case ixgbe_mac_82599EB:
  1156. case ixgbe_mac_X540:
  1157. reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
  1158. txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
  1159. break;
  1160. default:
  1161. /* for unknown hardware do not write register */
  1162. return;
  1163. }
  1164. /*
  1165. * We can enable relaxed ordering for reads, but not writes when
  1166. * DCA is enabled. This is due to a known issue in some chipsets
  1167. * which will cause the DCA tag to be cleared.
  1168. */
  1169. txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
  1170. IXGBE_DCA_TXCTRL_DATA_RRO_EN |
  1171. IXGBE_DCA_TXCTRL_DESC_DCA_EN;
  1172. IXGBE_WRITE_REG(hw, reg_offset, txctrl);
  1173. }
  1174. static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
  1175. struct ixgbe_ring *rx_ring,
  1176. int cpu)
  1177. {
  1178. struct ixgbe_hw *hw = &adapter->hw;
  1179. u32 rxctrl = 0;
  1180. u8 reg_idx = rx_ring->reg_idx;
  1181. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1182. rxctrl = dca3_get_tag(rx_ring->dev, cpu);
  1183. switch (hw->mac.type) {
  1184. case ixgbe_mac_82599EB:
  1185. case ixgbe_mac_X540:
  1186. rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
  1187. break;
  1188. default:
  1189. break;
  1190. }
  1191. /*
  1192. * We can enable relaxed ordering for reads, but not writes when
  1193. * DCA is enabled. This is due to a known issue in some chipsets
  1194. * which will cause the DCA tag to be cleared.
  1195. */
  1196. rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
  1197. IXGBE_DCA_RXCTRL_DATA_DCA_EN |
  1198. IXGBE_DCA_RXCTRL_DESC_DCA_EN;
  1199. IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
  1200. }
  1201. static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
  1202. {
  1203. struct ixgbe_adapter *adapter = q_vector->adapter;
  1204. struct ixgbe_ring *ring;
  1205. int cpu = get_cpu();
  1206. if (q_vector->cpu == cpu)
  1207. goto out_no_update;
  1208. ixgbe_for_each_ring(ring, q_vector->tx)
  1209. ixgbe_update_tx_dca(adapter, ring, cpu);
  1210. ixgbe_for_each_ring(ring, q_vector->rx)
  1211. ixgbe_update_rx_dca(adapter, ring, cpu);
  1212. q_vector->cpu = cpu;
  1213. out_no_update:
  1214. put_cpu();
  1215. }
  1216. static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
  1217. {
  1218. int i;
  1219. /* always use CB2 mode, difference is masked in the CB driver */
  1220. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1221. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
  1222. IXGBE_DCA_CTRL_DCA_MODE_CB2);
  1223. else
  1224. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
  1225. IXGBE_DCA_CTRL_DCA_DISABLE);
  1226. for (i = 0; i < adapter->num_q_vectors; i++) {
  1227. adapter->q_vector[i]->cpu = -1;
  1228. ixgbe_update_dca(adapter->q_vector[i]);
  1229. }
  1230. }
  1231. static int __ixgbe_notify_dca(struct device *dev, void *data)
  1232. {
  1233. struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
  1234. unsigned long event = *(unsigned long *)data;
  1235. if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
  1236. return 0;
  1237. switch (event) {
  1238. case DCA_PROVIDER_ADD:
  1239. /* if we're already enabled, don't do it again */
  1240. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  1241. break;
  1242. if (dca_add_requester(dev) == 0) {
  1243. adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
  1244. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
  1245. IXGBE_DCA_CTRL_DCA_MODE_CB2);
  1246. break;
  1247. }
  1248. /* Fall Through since DCA is disabled. */
  1249. case DCA_PROVIDER_REMOVE:
  1250. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
  1251. dca_remove_requester(dev);
  1252. adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
  1253. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
  1254. IXGBE_DCA_CTRL_DCA_DISABLE);
  1255. }
  1256. break;
  1257. }
  1258. return 0;
  1259. }
  1260. #endif /* CONFIG_IXGBE_DCA */
  1261. #define IXGBE_RSS_L4_TYPES_MASK \
  1262. ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
  1263. (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
  1264. (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
  1265. (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
  1266. static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
  1267. union ixgbe_adv_rx_desc *rx_desc,
  1268. struct sk_buff *skb)
  1269. {
  1270. u16 rss_type;
  1271. if (!(ring->netdev->features & NETIF_F_RXHASH))
  1272. return;
  1273. rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
  1274. IXGBE_RXDADV_RSSTYPE_MASK;
  1275. if (!rss_type)
  1276. return;
  1277. skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
  1278. (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
  1279. PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
  1280. }
  1281. #ifdef IXGBE_FCOE
  1282. /**
  1283. * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
  1284. * @ring: structure containing ring specific data
  1285. * @rx_desc: advanced rx descriptor
  1286. *
  1287. * Returns : true if it is FCoE pkt
  1288. */
  1289. static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
  1290. union ixgbe_adv_rx_desc *rx_desc)
  1291. {
  1292. __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  1293. return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
  1294. ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
  1295. (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
  1296. IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
  1297. }
  1298. #endif /* IXGBE_FCOE */
  1299. /**
  1300. * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
  1301. * @ring: structure containing ring specific data
  1302. * @rx_desc: current Rx descriptor being processed
  1303. * @skb: skb currently being received and modified
  1304. **/
  1305. static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
  1306. union ixgbe_adv_rx_desc *rx_desc,
  1307. struct sk_buff *skb)
  1308. {
  1309. __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
  1310. bool encap_pkt = false;
  1311. skb_checksum_none_assert(skb);
  1312. /* Rx csum disabled */
  1313. if (!(ring->netdev->features & NETIF_F_RXCSUM))
  1314. return;
  1315. /* check for VXLAN and Geneve packets */
  1316. if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) {
  1317. encap_pkt = true;
  1318. skb->encapsulation = 1;
  1319. }
  1320. /* if IP and error */
  1321. if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
  1322. ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
  1323. ring->rx_stats.csum_err++;
  1324. return;
  1325. }
  1326. if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
  1327. return;
  1328. if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
  1329. /*
  1330. * 82599 errata, UDP frames with a 0 checksum can be marked as
  1331. * checksum errors.
  1332. */
  1333. if ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_UDP)) &&
  1334. test_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state))
  1335. return;
  1336. ring->rx_stats.csum_err++;
  1337. return;
  1338. }
  1339. /* It must be a TCP or UDP packet with a valid checksum */
  1340. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1341. if (encap_pkt) {
  1342. if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS))
  1343. return;
  1344. if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
  1345. skb->ip_summed = CHECKSUM_NONE;
  1346. return;
  1347. }
  1348. /* If we checked the outer header let the stack know */
  1349. skb->csum_level = 1;
  1350. }
  1351. }
  1352. static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
  1353. struct ixgbe_rx_buffer *bi)
  1354. {
  1355. struct page *page = bi->page;
  1356. dma_addr_t dma;
  1357. /* since we are recycling buffers we should seldom need to alloc */
  1358. if (likely(page))
  1359. return true;
  1360. /* alloc new page for storage */
  1361. page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring));
  1362. if (unlikely(!page)) {
  1363. rx_ring->rx_stats.alloc_rx_page_failed++;
  1364. return false;
  1365. }
  1366. /* map page for use */
  1367. dma = dma_map_page(rx_ring->dev, page, 0,
  1368. ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
  1369. /*
  1370. * if mapping failed free memory back to system since
  1371. * there isn't much point in holding memory we can't use
  1372. */
  1373. if (dma_mapping_error(rx_ring->dev, dma)) {
  1374. __free_pages(page, ixgbe_rx_pg_order(rx_ring));
  1375. rx_ring->rx_stats.alloc_rx_page_failed++;
  1376. return false;
  1377. }
  1378. bi->dma = dma;
  1379. bi->page = page;
  1380. bi->page_offset = 0;
  1381. return true;
  1382. }
  1383. /**
  1384. * ixgbe_alloc_rx_buffers - Replace used receive buffers
  1385. * @rx_ring: ring to place buffers on
  1386. * @cleaned_count: number of buffers to replace
  1387. **/
  1388. void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
  1389. {
  1390. union ixgbe_adv_rx_desc *rx_desc;
  1391. struct ixgbe_rx_buffer *bi;
  1392. u16 i = rx_ring->next_to_use;
  1393. /* nothing to do */
  1394. if (!cleaned_count)
  1395. return;
  1396. rx_desc = IXGBE_RX_DESC(rx_ring, i);
  1397. bi = &rx_ring->rx_buffer_info[i];
  1398. i -= rx_ring->count;
  1399. do {
  1400. if (!ixgbe_alloc_mapped_page(rx_ring, bi))
  1401. break;
  1402. /*
  1403. * Refresh the desc even if buffer_addrs didn't change
  1404. * because each write-back erases this info.
  1405. */
  1406. rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
  1407. rx_desc++;
  1408. bi++;
  1409. i++;
  1410. if (unlikely(!i)) {
  1411. rx_desc = IXGBE_RX_DESC(rx_ring, 0);
  1412. bi = rx_ring->rx_buffer_info;
  1413. i -= rx_ring->count;
  1414. }
  1415. /* clear the status bits for the next_to_use descriptor */
  1416. rx_desc->wb.upper.status_error = 0;
  1417. cleaned_count--;
  1418. } while (cleaned_count);
  1419. i += rx_ring->count;
  1420. if (rx_ring->next_to_use != i) {
  1421. rx_ring->next_to_use = i;
  1422. /* update next to alloc since we have filled the ring */
  1423. rx_ring->next_to_alloc = i;
  1424. /* Force memory writes to complete before letting h/w
  1425. * know there are new descriptors to fetch. (Only
  1426. * applicable for weak-ordered memory model archs,
  1427. * such as IA-64).
  1428. */
  1429. wmb();
  1430. writel(i, rx_ring->tail);
  1431. }
  1432. }
  1433. static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring,
  1434. struct sk_buff *skb)
  1435. {
  1436. u16 hdr_len = skb_headlen(skb);
  1437. /* set gso_size to avoid messing up TCP MSS */
  1438. skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len),
  1439. IXGBE_CB(skb)->append_cnt);
  1440. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  1441. }
  1442. static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring,
  1443. struct sk_buff *skb)
  1444. {
  1445. /* if append_cnt is 0 then frame is not RSC */
  1446. if (!IXGBE_CB(skb)->append_cnt)
  1447. return;
  1448. rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt;
  1449. rx_ring->rx_stats.rsc_flush++;
  1450. ixgbe_set_rsc_gso_size(rx_ring, skb);
  1451. /* gso_size is computed using append_cnt so always clear it last */
  1452. IXGBE_CB(skb)->append_cnt = 0;
  1453. }
  1454. /**
  1455. * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor
  1456. * @rx_ring: rx descriptor ring packet is being transacted on
  1457. * @rx_desc: pointer to the EOP Rx descriptor
  1458. * @skb: pointer to current skb being populated
  1459. *
  1460. * This function checks the ring, descriptor, and packet information in
  1461. * order to populate the hash, checksum, VLAN, timestamp, protocol, and
  1462. * other fields within the skb.
  1463. **/
  1464. static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
  1465. union ixgbe_adv_rx_desc *rx_desc,
  1466. struct sk_buff *skb)
  1467. {
  1468. struct net_device *dev = rx_ring->netdev;
  1469. u32 flags = rx_ring->q_vector->adapter->flags;
  1470. ixgbe_update_rsc_stats(rx_ring, skb);
  1471. ixgbe_rx_hash(rx_ring, rx_desc, skb);
  1472. ixgbe_rx_checksum(rx_ring, rx_desc, skb);
  1473. if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
  1474. ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
  1475. if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  1476. ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
  1477. u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
  1478. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
  1479. }
  1480. skb_record_rx_queue(skb, rx_ring->queue_index);
  1481. skb->protocol = eth_type_trans(skb, dev);
  1482. }
  1483. static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
  1484. struct sk_buff *skb)
  1485. {
  1486. skb_mark_napi_id(skb, &q_vector->napi);
  1487. if (ixgbe_qv_busy_polling(q_vector))
  1488. netif_receive_skb(skb);
  1489. else
  1490. napi_gro_receive(&q_vector->napi, skb);
  1491. }
  1492. /**
  1493. * ixgbe_is_non_eop - process handling of non-EOP buffers
  1494. * @rx_ring: Rx ring being processed
  1495. * @rx_desc: Rx descriptor for current buffer
  1496. * @skb: Current socket buffer containing buffer in progress
  1497. *
  1498. * This function updates next to clean. If the buffer is an EOP buffer
  1499. * this function exits returning false, otherwise it will place the
  1500. * sk_buff in the next buffer to be chained and return true indicating
  1501. * that this is in fact a non-EOP buffer.
  1502. **/
  1503. static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
  1504. union ixgbe_adv_rx_desc *rx_desc,
  1505. struct sk_buff *skb)
  1506. {
  1507. u32 ntc = rx_ring->next_to_clean + 1;
  1508. /* fetch, update, and store next to clean */
  1509. ntc = (ntc < rx_ring->count) ? ntc : 0;
  1510. rx_ring->next_to_clean = ntc;
  1511. prefetch(IXGBE_RX_DESC(rx_ring, ntc));
  1512. /* update RSC append count if present */
  1513. if (ring_is_rsc_enabled(rx_ring)) {
  1514. __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data &
  1515. cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK);
  1516. if (unlikely(rsc_enabled)) {
  1517. u32 rsc_cnt = le32_to_cpu(rsc_enabled);
  1518. rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT;
  1519. IXGBE_CB(skb)->append_cnt += rsc_cnt - 1;
  1520. /* update ntc based on RSC value */
  1521. ntc = le32_to_cpu(rx_desc->wb.upper.status_error);
  1522. ntc &= IXGBE_RXDADV_NEXTP_MASK;
  1523. ntc >>= IXGBE_RXDADV_NEXTP_SHIFT;
  1524. }
  1525. }
  1526. /* if we are the last buffer then there is nothing else to do */
  1527. if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
  1528. return false;
  1529. /* place skb in next buffer to be received */
  1530. rx_ring->rx_buffer_info[ntc].skb = skb;
  1531. rx_ring->rx_stats.non_eop_descs++;
  1532. return true;
  1533. }
  1534. /**
  1535. * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail
  1536. * @rx_ring: rx descriptor ring packet is being transacted on
  1537. * @skb: pointer to current skb being adjusted
  1538. *
  1539. * This function is an ixgbe specific version of __pskb_pull_tail. The
  1540. * main difference between this version and the original function is that
  1541. * this function can make several assumptions about the state of things
  1542. * that allow for significant optimizations versus the standard function.
  1543. * As a result we can do things like drop a frag and maintain an accurate
  1544. * truesize for the skb.
  1545. */
  1546. static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
  1547. struct sk_buff *skb)
  1548. {
  1549. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
  1550. unsigned char *va;
  1551. unsigned int pull_len;
  1552. /*
  1553. * it is valid to use page_address instead of kmap since we are
  1554. * working with pages allocated out of the lomem pool per
  1555. * alloc_page(GFP_ATOMIC)
  1556. */
  1557. va = skb_frag_address(frag);
  1558. /*
  1559. * we need the header to contain the greater of either ETH_HLEN or
  1560. * 60 bytes if the skb->len is less than 60 for skb_pad.
  1561. */
  1562. pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
  1563. /* align pull length to size of long to optimize memcpy performance */
  1564. skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
  1565. /* update all of the pointers */
  1566. skb_frag_size_sub(frag, pull_len);
  1567. frag->page_offset += pull_len;
  1568. skb->data_len -= pull_len;
  1569. skb->tail += pull_len;
  1570. }
  1571. /**
  1572. * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
  1573. * @rx_ring: rx descriptor ring packet is being transacted on
  1574. * @skb: pointer to current skb being updated
  1575. *
  1576. * This function provides a basic DMA sync up for the first fragment of an
  1577. * skb. The reason for doing this is that the first fragment cannot be
  1578. * unmapped until we have reached the end of packet descriptor for a buffer
  1579. * chain.
  1580. */
  1581. static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
  1582. struct sk_buff *skb)
  1583. {
  1584. /* if the page was released unmap it, else just sync our portion */
  1585. if (unlikely(IXGBE_CB(skb)->page_released)) {
  1586. dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
  1587. ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
  1588. IXGBE_CB(skb)->page_released = false;
  1589. } else {
  1590. struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
  1591. dma_sync_single_range_for_cpu(rx_ring->dev,
  1592. IXGBE_CB(skb)->dma,
  1593. frag->page_offset,
  1594. ixgbe_rx_bufsz(rx_ring),
  1595. DMA_FROM_DEVICE);
  1596. }
  1597. IXGBE_CB(skb)->dma = 0;
  1598. }
  1599. /**
  1600. * ixgbe_cleanup_headers - Correct corrupted or empty headers
  1601. * @rx_ring: rx descriptor ring packet is being transacted on
  1602. * @rx_desc: pointer to the EOP Rx descriptor
  1603. * @skb: pointer to current skb being fixed
  1604. *
  1605. * Check for corrupted packet headers caused by senders on the local L2
  1606. * embedded NIC switch not setting up their Tx Descriptors right. These
  1607. * should be very rare.
  1608. *
  1609. * Also address the case where we are pulling data in on pages only
  1610. * and as such no data is present in the skb header.
  1611. *
  1612. * In addition if skb is not at least 60 bytes we need to pad it so that
  1613. * it is large enough to qualify as a valid Ethernet frame.
  1614. *
  1615. * Returns true if an error was encountered and skb was freed.
  1616. **/
  1617. static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
  1618. union ixgbe_adv_rx_desc *rx_desc,
  1619. struct sk_buff *skb)
  1620. {
  1621. struct net_device *netdev = rx_ring->netdev;
  1622. /* verify that the packet does not have any known errors */
  1623. if (unlikely(ixgbe_test_staterr(rx_desc,
  1624. IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
  1625. !(netdev->features & NETIF_F_RXALL))) {
  1626. dev_kfree_skb_any(skb);
  1627. return true;
  1628. }
  1629. /* place header in linear portion of buffer */
  1630. if (skb_is_nonlinear(skb))
  1631. ixgbe_pull_tail(rx_ring, skb);
  1632. #ifdef IXGBE_FCOE
  1633. /* do not attempt to pad FCoE Frames as this will disrupt DDP */
  1634. if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
  1635. return false;
  1636. #endif
  1637. /* if eth_skb_pad returns an error the skb was freed */
  1638. if (eth_skb_pad(skb))
  1639. return true;
  1640. return false;
  1641. }
  1642. /**
  1643. * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring
  1644. * @rx_ring: rx descriptor ring to store buffers on
  1645. * @old_buff: donor buffer to have page reused
  1646. *
  1647. * Synchronizes page for reuse by the adapter
  1648. **/
  1649. static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
  1650. struct ixgbe_rx_buffer *old_buff)
  1651. {
  1652. struct ixgbe_rx_buffer *new_buff;
  1653. u16 nta = rx_ring->next_to_alloc;
  1654. new_buff = &rx_ring->rx_buffer_info[nta];
  1655. /* update, and store next to alloc */
  1656. nta++;
  1657. rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
  1658. /* transfer page from old buffer to new buffer */
  1659. *new_buff = *old_buff;
  1660. /* sync the buffer for use by the device */
  1661. dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
  1662. new_buff->page_offset,
  1663. ixgbe_rx_bufsz(rx_ring),
  1664. DMA_FROM_DEVICE);
  1665. }
  1666. static inline bool ixgbe_page_is_reserved(struct page *page)
  1667. {
  1668. return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
  1669. }
  1670. /**
  1671. * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
  1672. * @rx_ring: rx descriptor ring to transact packets on
  1673. * @rx_buffer: buffer containing page to add
  1674. * @rx_desc: descriptor containing length of buffer written by hardware
  1675. * @skb: sk_buff to place the data into
  1676. *
  1677. * This function will add the data contained in rx_buffer->page to the skb.
  1678. * This is done either through a direct copy if the data in the buffer is
  1679. * less than the skb header size, otherwise it will just attach the page as
  1680. * a frag to the skb.
  1681. *
  1682. * The function will then update the page offset if necessary and return
  1683. * true if the buffer can be reused by the adapter.
  1684. **/
  1685. static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
  1686. struct ixgbe_rx_buffer *rx_buffer,
  1687. union ixgbe_adv_rx_desc *rx_desc,
  1688. struct sk_buff *skb)
  1689. {
  1690. struct page *page = rx_buffer->page;
  1691. unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
  1692. #if (PAGE_SIZE < 8192)
  1693. unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
  1694. #else
  1695. unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
  1696. unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
  1697. ixgbe_rx_bufsz(rx_ring);
  1698. #endif
  1699. if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
  1700. unsigned char *va = page_address(page) + rx_buffer->page_offset;
  1701. memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
  1702. /* page is not reserved, we can reuse buffer as-is */
  1703. if (likely(!ixgbe_page_is_reserved(page)))
  1704. return true;
  1705. /* this page cannot be reused so discard it */
  1706. __free_pages(page, ixgbe_rx_pg_order(rx_ring));
  1707. return false;
  1708. }
  1709. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
  1710. rx_buffer->page_offset, size, truesize);
  1711. /* avoid re-using remote pages */
  1712. if (unlikely(ixgbe_page_is_reserved(page)))
  1713. return false;
  1714. #if (PAGE_SIZE < 8192)
  1715. /* if we are only owner of page we can reuse it */
  1716. if (unlikely(page_count(page) != 1))
  1717. return false;
  1718. /* flip page offset to other buffer */
  1719. rx_buffer->page_offset ^= truesize;
  1720. #else
  1721. /* move offset up to the next cache line */
  1722. rx_buffer->page_offset += truesize;
  1723. if (rx_buffer->page_offset > last_offset)
  1724. return false;
  1725. #endif
  1726. /* Even if we own the page, we are not allowed to use atomic_set()
  1727. * This would break get_page_unless_zero() users.
  1728. */
  1729. page_ref_inc(page);
  1730. return true;
  1731. }
  1732. static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
  1733. union ixgbe_adv_rx_desc *rx_desc)
  1734. {
  1735. struct ixgbe_rx_buffer *rx_buffer;
  1736. struct sk_buff *skb;
  1737. struct page *page;
  1738. rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
  1739. page = rx_buffer->page;
  1740. prefetchw(page);
  1741. skb = rx_buffer->skb;
  1742. if (likely(!skb)) {
  1743. void *page_addr = page_address(page) +
  1744. rx_buffer->page_offset;
  1745. /* prefetch first cache line of first page */
  1746. prefetch(page_addr);
  1747. #if L1_CACHE_BYTES < 128
  1748. prefetch(page_addr + L1_CACHE_BYTES);
  1749. #endif
  1750. /* allocate a skb to store the frags */
  1751. skb = napi_alloc_skb(&rx_ring->q_vector->napi,
  1752. IXGBE_RX_HDR_SIZE);
  1753. if (unlikely(!skb)) {
  1754. rx_ring->rx_stats.alloc_rx_buff_failed++;
  1755. return NULL;
  1756. }
  1757. /*
  1758. * we will be copying header into skb->data in
  1759. * pskb_may_pull so it is in our interest to prefetch
  1760. * it now to avoid a possible cache miss
  1761. */
  1762. prefetchw(skb->data);
  1763. /*
  1764. * Delay unmapping of the first packet. It carries the
  1765. * header information, HW may still access the header
  1766. * after the writeback. Only unmap it when EOP is
  1767. * reached
  1768. */
  1769. if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
  1770. goto dma_sync;
  1771. IXGBE_CB(skb)->dma = rx_buffer->dma;
  1772. } else {
  1773. if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
  1774. ixgbe_dma_sync_frag(rx_ring, skb);
  1775. dma_sync:
  1776. /* we are reusing so sync this buffer for CPU use */
  1777. dma_sync_single_range_for_cpu(rx_ring->dev,
  1778. rx_buffer->dma,
  1779. rx_buffer->page_offset,
  1780. ixgbe_rx_bufsz(rx_ring),
  1781. DMA_FROM_DEVICE);
  1782. rx_buffer->skb = NULL;
  1783. }
  1784. /* pull page into skb */
  1785. if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
  1786. /* hand second half of page back to the ring */
  1787. ixgbe_reuse_rx_page(rx_ring, rx_buffer);
  1788. } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
  1789. /* the page has been released from the ring */
  1790. IXGBE_CB(skb)->page_released = true;
  1791. } else {
  1792. /* we are not reusing the buffer so unmap it */
  1793. dma_unmap_page(rx_ring->dev, rx_buffer->dma,
  1794. ixgbe_rx_pg_size(rx_ring),
  1795. DMA_FROM_DEVICE);
  1796. }
  1797. /* clear contents of buffer_info */
  1798. rx_buffer->page = NULL;
  1799. return skb;
  1800. }
  1801. /**
  1802. * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
  1803. * @q_vector: structure containing interrupt and ring information
  1804. * @rx_ring: rx descriptor ring to transact packets on
  1805. * @budget: Total limit on number of packets to process
  1806. *
  1807. * This function provides a "bounce buffer" approach to Rx interrupt
  1808. * processing. The advantage to this is that on systems that have
  1809. * expensive overhead for IOMMU access this provides a means of avoiding
  1810. * it by maintaining the mapping of the page to the syste.
  1811. *
  1812. * Returns amount of work completed
  1813. **/
  1814. static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
  1815. struct ixgbe_ring *rx_ring,
  1816. const int budget)
  1817. {
  1818. unsigned int total_rx_bytes = 0, total_rx_packets = 0;
  1819. #ifdef IXGBE_FCOE
  1820. struct ixgbe_adapter *adapter = q_vector->adapter;
  1821. int ddp_bytes;
  1822. unsigned int mss = 0;
  1823. #endif /* IXGBE_FCOE */
  1824. u16 cleaned_count = ixgbe_desc_unused(rx_ring);
  1825. while (likely(total_rx_packets < budget)) {
  1826. union ixgbe_adv_rx_desc *rx_desc;
  1827. struct sk_buff *skb;
  1828. /* return some buffers to hardware, one at a time is too slow */
  1829. if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
  1830. ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
  1831. cleaned_count = 0;
  1832. }
  1833. rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
  1834. if (!rx_desc->wb.upper.status_error)
  1835. break;
  1836. /* This memory barrier is needed to keep us from reading
  1837. * any other fields out of the rx_desc until we know the
  1838. * descriptor has been written back
  1839. */
  1840. dma_rmb();
  1841. /* retrieve a buffer from the ring */
  1842. skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
  1843. /* exit if we failed to retrieve a buffer */
  1844. if (!skb)
  1845. break;
  1846. cleaned_count++;
  1847. /* place incomplete frames back on ring for completion */
  1848. if (ixgbe_is_non_eop(rx_ring, rx_desc, skb))
  1849. continue;
  1850. /* verify the packet layout is correct */
  1851. if (ixgbe_cleanup_headers(rx_ring, rx_desc, skb))
  1852. continue;
  1853. /* probably a little skewed due to removing CRC */
  1854. total_rx_bytes += skb->len;
  1855. /* populate checksum, timestamp, VLAN, and protocol */
  1856. ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
  1857. #ifdef IXGBE_FCOE
  1858. /* if ddp, not passing to ULD unless for FCP_RSP or error */
  1859. if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
  1860. ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
  1861. /* include DDPed FCoE data */
  1862. if (ddp_bytes > 0) {
  1863. if (!mss) {
  1864. mss = rx_ring->netdev->mtu -
  1865. sizeof(struct fcoe_hdr) -
  1866. sizeof(struct fc_frame_header) -
  1867. sizeof(struct fcoe_crc_eof);
  1868. if (mss > 512)
  1869. mss &= ~511;
  1870. }
  1871. total_rx_bytes += ddp_bytes;
  1872. total_rx_packets += DIV_ROUND_UP(ddp_bytes,
  1873. mss);
  1874. }
  1875. if (!ddp_bytes) {
  1876. dev_kfree_skb_any(skb);
  1877. continue;
  1878. }
  1879. }
  1880. #endif /* IXGBE_FCOE */
  1881. ixgbe_rx_skb(q_vector, skb);
  1882. /* update budget accounting */
  1883. total_rx_packets++;
  1884. }
  1885. u64_stats_update_begin(&rx_ring->syncp);
  1886. rx_ring->stats.packets += total_rx_packets;
  1887. rx_ring->stats.bytes += total_rx_bytes;
  1888. u64_stats_update_end(&rx_ring->syncp);
  1889. q_vector->rx.total_packets += total_rx_packets;
  1890. q_vector->rx.total_bytes += total_rx_bytes;
  1891. return total_rx_packets;
  1892. }
  1893. #ifdef CONFIG_NET_RX_BUSY_POLL
  1894. /* must be called with local_bh_disable()d */
  1895. static int ixgbe_low_latency_recv(struct napi_struct *napi)
  1896. {
  1897. struct ixgbe_q_vector *q_vector =
  1898. container_of(napi, struct ixgbe_q_vector, napi);
  1899. struct ixgbe_adapter *adapter = q_vector->adapter;
  1900. struct ixgbe_ring *ring;
  1901. int found = 0;
  1902. if (test_bit(__IXGBE_DOWN, &adapter->state))
  1903. return LL_FLUSH_FAILED;
  1904. if (!ixgbe_qv_lock_poll(q_vector))
  1905. return LL_FLUSH_BUSY;
  1906. ixgbe_for_each_ring(ring, q_vector->rx) {
  1907. found = ixgbe_clean_rx_irq(q_vector, ring, 4);
  1908. #ifdef BP_EXTENDED_STATS
  1909. if (found)
  1910. ring->stats.cleaned += found;
  1911. else
  1912. ring->stats.misses++;
  1913. #endif
  1914. if (found)
  1915. break;
  1916. }
  1917. ixgbe_qv_unlock_poll(q_vector);
  1918. return found;
  1919. }
  1920. #endif /* CONFIG_NET_RX_BUSY_POLL */
  1921. /**
  1922. * ixgbe_configure_msix - Configure MSI-X hardware
  1923. * @adapter: board private structure
  1924. *
  1925. * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
  1926. * interrupts.
  1927. **/
  1928. static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
  1929. {
  1930. struct ixgbe_q_vector *q_vector;
  1931. int v_idx;
  1932. u32 mask;
  1933. /* Populate MSIX to EITR Select */
  1934. if (adapter->num_vfs > 32) {
  1935. u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
  1936. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
  1937. }
  1938. /*
  1939. * Populate the IVAR table and set the ITR values to the
  1940. * corresponding register.
  1941. */
  1942. for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
  1943. struct ixgbe_ring *ring;
  1944. q_vector = adapter->q_vector[v_idx];
  1945. ixgbe_for_each_ring(ring, q_vector->rx)
  1946. ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
  1947. ixgbe_for_each_ring(ring, q_vector->tx)
  1948. ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
  1949. ixgbe_write_eitr(q_vector);
  1950. }
  1951. switch (adapter->hw.mac.type) {
  1952. case ixgbe_mac_82598EB:
  1953. ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
  1954. v_idx);
  1955. break;
  1956. case ixgbe_mac_82599EB:
  1957. case ixgbe_mac_X540:
  1958. case ixgbe_mac_X550:
  1959. case ixgbe_mac_X550EM_x:
  1960. case ixgbe_mac_x550em_a:
  1961. ixgbe_set_ivar(adapter, -1, 1, v_idx);
  1962. break;
  1963. default:
  1964. break;
  1965. }
  1966. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
  1967. /* set up to autoclear timer, and the vectors */
  1968. mask = IXGBE_EIMS_ENABLE_MASK;
  1969. mask &= ~(IXGBE_EIMS_OTHER |
  1970. IXGBE_EIMS_MAILBOX |
  1971. IXGBE_EIMS_LSC);
  1972. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
  1973. }
  1974. enum latency_range {
  1975. lowest_latency = 0,
  1976. low_latency = 1,
  1977. bulk_latency = 2,
  1978. latency_invalid = 255
  1979. };
  1980. /**
  1981. * ixgbe_update_itr - update the dynamic ITR value based on statistics
  1982. * @q_vector: structure containing interrupt and ring information
  1983. * @ring_container: structure containing ring performance data
  1984. *
  1985. * Stores a new ITR value based on packets and byte
  1986. * counts during the last interrupt. The advantage of per interrupt
  1987. * computation is faster updates and more accurate ITR for the current
  1988. * traffic pattern. Constants in this function were computed
  1989. * based on theoretical maximum wire speed and thresholds were set based
  1990. * on testing data as well as attempting to minimize response time
  1991. * while increasing bulk throughput.
  1992. * this functionality is controlled by the InterruptThrottleRate module
  1993. * parameter (see ixgbe_param.c)
  1994. **/
  1995. static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
  1996. struct ixgbe_ring_container *ring_container)
  1997. {
  1998. int bytes = ring_container->total_bytes;
  1999. int packets = ring_container->total_packets;
  2000. u32 timepassed_us;
  2001. u64 bytes_perint;
  2002. u8 itr_setting = ring_container->itr;
  2003. if (packets == 0)
  2004. return;
  2005. /* simple throttlerate management
  2006. * 0-10MB/s lowest (100000 ints/s)
  2007. * 10-20MB/s low (20000 ints/s)
  2008. * 20-1249MB/s bulk (12000 ints/s)
  2009. */
  2010. /* what was last interrupt timeslice? */
  2011. timepassed_us = q_vector->itr >> 2;
  2012. if (timepassed_us == 0)
  2013. return;
  2014. bytes_perint = bytes / timepassed_us; /* bytes/usec */
  2015. switch (itr_setting) {
  2016. case lowest_latency:
  2017. if (bytes_perint > 10)
  2018. itr_setting = low_latency;
  2019. break;
  2020. case low_latency:
  2021. if (bytes_perint > 20)
  2022. itr_setting = bulk_latency;
  2023. else if (bytes_perint <= 10)
  2024. itr_setting = lowest_latency;
  2025. break;
  2026. case bulk_latency:
  2027. if (bytes_perint <= 20)
  2028. itr_setting = low_latency;
  2029. break;
  2030. }
  2031. /* clear work counters since we have the values we need */
  2032. ring_container->total_bytes = 0;
  2033. ring_container->total_packets = 0;
  2034. /* write updated itr to ring container */
  2035. ring_container->itr = itr_setting;
  2036. }
  2037. /**
  2038. * ixgbe_write_eitr - write EITR register in hardware specific way
  2039. * @q_vector: structure containing interrupt and ring information
  2040. *
  2041. * This function is made to be called by ethtool and by the driver
  2042. * when it needs to update EITR registers at runtime. Hardware
  2043. * specific quirks/differences are taken care of here.
  2044. */
  2045. void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
  2046. {
  2047. struct ixgbe_adapter *adapter = q_vector->adapter;
  2048. struct ixgbe_hw *hw = &adapter->hw;
  2049. int v_idx = q_vector->v_idx;
  2050. u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
  2051. switch (adapter->hw.mac.type) {
  2052. case ixgbe_mac_82598EB:
  2053. /* must write high and low 16 bits to reset counter */
  2054. itr_reg |= (itr_reg << 16);
  2055. break;
  2056. case ixgbe_mac_82599EB:
  2057. case ixgbe_mac_X540:
  2058. case ixgbe_mac_X550:
  2059. case ixgbe_mac_X550EM_x:
  2060. case ixgbe_mac_x550em_a:
  2061. /*
  2062. * set the WDIS bit to not clear the timer bits and cause an
  2063. * immediate assertion of the interrupt
  2064. */
  2065. itr_reg |= IXGBE_EITR_CNT_WDIS;
  2066. break;
  2067. default:
  2068. break;
  2069. }
  2070. IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
  2071. }
  2072. static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
  2073. {
  2074. u32 new_itr = q_vector->itr;
  2075. u8 current_itr;
  2076. ixgbe_update_itr(q_vector, &q_vector->tx);
  2077. ixgbe_update_itr(q_vector, &q_vector->rx);
  2078. current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
  2079. switch (current_itr) {
  2080. /* counts and packets in update_itr are dependent on these numbers */
  2081. case lowest_latency:
  2082. new_itr = IXGBE_100K_ITR;
  2083. break;
  2084. case low_latency:
  2085. new_itr = IXGBE_20K_ITR;
  2086. break;
  2087. case bulk_latency:
  2088. new_itr = IXGBE_12K_ITR;
  2089. break;
  2090. default:
  2091. break;
  2092. }
  2093. if (new_itr != q_vector->itr) {
  2094. /* do an exponential smoothing */
  2095. new_itr = (10 * new_itr * q_vector->itr) /
  2096. ((9 * new_itr) + q_vector->itr);
  2097. /* save the algorithm value here */
  2098. q_vector->itr = new_itr;
  2099. ixgbe_write_eitr(q_vector);
  2100. }
  2101. }
  2102. /**
  2103. * ixgbe_check_overtemp_subtask - check for over temperature
  2104. * @adapter: pointer to adapter
  2105. **/
  2106. static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
  2107. {
  2108. struct ixgbe_hw *hw = &adapter->hw;
  2109. u32 eicr = adapter->interrupt_event;
  2110. if (test_bit(__IXGBE_DOWN, &adapter->state))
  2111. return;
  2112. if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
  2113. !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
  2114. return;
  2115. adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
  2116. switch (hw->device_id) {
  2117. case IXGBE_DEV_ID_82599_T3_LOM:
  2118. /*
  2119. * Since the warning interrupt is for both ports
  2120. * we don't have to check if:
  2121. * - This interrupt wasn't for our port.
  2122. * - We may have missed the interrupt so always have to
  2123. * check if we got a LSC
  2124. */
  2125. if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) &&
  2126. !(eicr & IXGBE_EICR_LSC))
  2127. return;
  2128. if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
  2129. u32 speed;
  2130. bool link_up = false;
  2131. hw->mac.ops.check_link(hw, &speed, &link_up, false);
  2132. if (link_up)
  2133. return;
  2134. }
  2135. /* Check if this is not due to overtemp */
  2136. if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
  2137. return;
  2138. break;
  2139. default:
  2140. if (adapter->hw.mac.type >= ixgbe_mac_X540)
  2141. return;
  2142. if (!(eicr & IXGBE_EICR_GPI_SDP0(hw)))
  2143. return;
  2144. break;
  2145. }
  2146. e_crit(drv, "%s\n", ixgbe_overheat_msg);
  2147. adapter->interrupt_event = 0;
  2148. }
  2149. static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
  2150. {
  2151. struct ixgbe_hw *hw = &adapter->hw;
  2152. if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
  2153. (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
  2154. e_crit(probe, "Fan has stopped, replace the adapter\n");
  2155. /* write to clear the interrupt */
  2156. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
  2157. }
  2158. }
  2159. static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
  2160. {
  2161. struct ixgbe_hw *hw = &adapter->hw;
  2162. if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
  2163. return;
  2164. switch (adapter->hw.mac.type) {
  2165. case ixgbe_mac_82599EB:
  2166. /*
  2167. * Need to check link state so complete overtemp check
  2168. * on service task
  2169. */
  2170. if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) ||
  2171. (eicr & IXGBE_EICR_LSC)) &&
  2172. (!test_bit(__IXGBE_DOWN, &adapter->state))) {
  2173. adapter->interrupt_event = eicr;
  2174. adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
  2175. ixgbe_service_event_schedule(adapter);
  2176. return;
  2177. }
  2178. return;
  2179. case ixgbe_mac_X540:
  2180. if (!(eicr & IXGBE_EICR_TS))
  2181. return;
  2182. break;
  2183. default:
  2184. return;
  2185. }
  2186. e_crit(drv, "%s\n", ixgbe_overheat_msg);
  2187. }
  2188. static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
  2189. {
  2190. switch (hw->mac.type) {
  2191. case ixgbe_mac_82598EB:
  2192. if (hw->phy.type == ixgbe_phy_nl)
  2193. return true;
  2194. return false;
  2195. case ixgbe_mac_82599EB:
  2196. case ixgbe_mac_X550EM_x:
  2197. case ixgbe_mac_x550em_a:
  2198. switch (hw->mac.ops.get_media_type(hw)) {
  2199. case ixgbe_media_type_fiber:
  2200. case ixgbe_media_type_fiber_qsfp:
  2201. return true;
  2202. default:
  2203. return false;
  2204. }
  2205. default:
  2206. return false;
  2207. }
  2208. }
  2209. static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
  2210. {
  2211. struct ixgbe_hw *hw = &adapter->hw;
  2212. u32 eicr_mask = IXGBE_EICR_GPI_SDP2(hw);
  2213. if (!ixgbe_is_sfp(hw))
  2214. return;
  2215. /* Later MAC's use different SDP */
  2216. if (hw->mac.type >= ixgbe_mac_X540)
  2217. eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
  2218. if (eicr & eicr_mask) {
  2219. /* Clear the interrupt */
  2220. IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask);
  2221. if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
  2222. adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
  2223. adapter->sfp_poll_time = 0;
  2224. ixgbe_service_event_schedule(adapter);
  2225. }
  2226. }
  2227. if (adapter->hw.mac.type == ixgbe_mac_82599EB &&
  2228. (eicr & IXGBE_EICR_GPI_SDP1(hw))) {
  2229. /* Clear the interrupt */
  2230. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw));
  2231. if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
  2232. adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
  2233. ixgbe_service_event_schedule(adapter);
  2234. }
  2235. }
  2236. }
  2237. static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
  2238. {
  2239. struct ixgbe_hw *hw = &adapter->hw;
  2240. adapter->lsc_int++;
  2241. adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
  2242. adapter->link_check_timeout = jiffies;
  2243. if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
  2244. IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
  2245. IXGBE_WRITE_FLUSH(hw);
  2246. ixgbe_service_event_schedule(adapter);
  2247. }
  2248. }
  2249. static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
  2250. u64 qmask)
  2251. {
  2252. u32 mask;
  2253. struct ixgbe_hw *hw = &adapter->hw;
  2254. switch (hw->mac.type) {
  2255. case ixgbe_mac_82598EB:
  2256. mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
  2257. IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
  2258. break;
  2259. case ixgbe_mac_82599EB:
  2260. case ixgbe_mac_X540:
  2261. case ixgbe_mac_X550:
  2262. case ixgbe_mac_X550EM_x:
  2263. case ixgbe_mac_x550em_a:
  2264. mask = (qmask & 0xFFFFFFFF);
  2265. if (mask)
  2266. IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
  2267. mask = (qmask >> 32);
  2268. if (mask)
  2269. IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
  2270. break;
  2271. default:
  2272. break;
  2273. }
  2274. /* skip the flush */
  2275. }
  2276. static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
  2277. u64 qmask)
  2278. {
  2279. u32 mask;
  2280. struct ixgbe_hw *hw = &adapter->hw;
  2281. switch (hw->mac.type) {
  2282. case ixgbe_mac_82598EB:
  2283. mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
  2284. IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
  2285. break;
  2286. case ixgbe_mac_82599EB:
  2287. case ixgbe_mac_X540:
  2288. case ixgbe_mac_X550:
  2289. case ixgbe_mac_X550EM_x:
  2290. case ixgbe_mac_x550em_a:
  2291. mask = (qmask & 0xFFFFFFFF);
  2292. if (mask)
  2293. IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
  2294. mask = (qmask >> 32);
  2295. if (mask)
  2296. IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
  2297. break;
  2298. default:
  2299. break;
  2300. }
  2301. /* skip the flush */
  2302. }
  2303. /**
  2304. * ixgbe_irq_enable - Enable default interrupt generation settings
  2305. * @adapter: board private structure
  2306. **/
  2307. static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
  2308. bool flush)
  2309. {
  2310. struct ixgbe_hw *hw = &adapter->hw;
  2311. u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
  2312. /* don't reenable LSC while waiting for link */
  2313. if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
  2314. mask &= ~IXGBE_EIMS_LSC;
  2315. if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
  2316. switch (adapter->hw.mac.type) {
  2317. case ixgbe_mac_82599EB:
  2318. mask |= IXGBE_EIMS_GPI_SDP0(hw);
  2319. break;
  2320. case ixgbe_mac_X540:
  2321. case ixgbe_mac_X550:
  2322. case ixgbe_mac_X550EM_x:
  2323. case ixgbe_mac_x550em_a:
  2324. mask |= IXGBE_EIMS_TS;
  2325. break;
  2326. default:
  2327. break;
  2328. }
  2329. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
  2330. mask |= IXGBE_EIMS_GPI_SDP1(hw);
  2331. switch (adapter->hw.mac.type) {
  2332. case ixgbe_mac_82599EB:
  2333. mask |= IXGBE_EIMS_GPI_SDP1(hw);
  2334. mask |= IXGBE_EIMS_GPI_SDP2(hw);
  2335. /* fall through */
  2336. case ixgbe_mac_X540:
  2337. case ixgbe_mac_X550:
  2338. case ixgbe_mac_X550EM_x:
  2339. case ixgbe_mac_x550em_a:
  2340. if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
  2341. adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
  2342. adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
  2343. mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
  2344. if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
  2345. mask |= IXGBE_EICR_GPI_SDP0_X540;
  2346. mask |= IXGBE_EIMS_ECC;
  2347. mask |= IXGBE_EIMS_MAILBOX;
  2348. break;
  2349. default:
  2350. break;
  2351. }
  2352. if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
  2353. !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
  2354. mask |= IXGBE_EIMS_FLOW_DIR;
  2355. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
  2356. if (queues)
  2357. ixgbe_irq_enable_queues(adapter, ~0);
  2358. if (flush)
  2359. IXGBE_WRITE_FLUSH(&adapter->hw);
  2360. }
  2361. static irqreturn_t ixgbe_msix_other(int irq, void *data)
  2362. {
  2363. struct ixgbe_adapter *adapter = data;
  2364. struct ixgbe_hw *hw = &adapter->hw;
  2365. u32 eicr;
  2366. /*
  2367. * Workaround for Silicon errata. Use clear-by-write instead
  2368. * of clear-by-read. Reading with EICS will return the
  2369. * interrupt causes without clearing, which later be done
  2370. * with the write to EICR.
  2371. */
  2372. eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
  2373. /* The lower 16bits of the EICR register are for the queue interrupts
  2374. * which should be masked here in order to not accidentally clear them if
  2375. * the bits are high when ixgbe_msix_other is called. There is a race
  2376. * condition otherwise which results in possible performance loss
  2377. * especially if the ixgbe_msix_other interrupt is triggering
  2378. * consistently (as it would when PPS is turned on for the X540 device)
  2379. */
  2380. eicr &= 0xFFFF0000;
  2381. IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
  2382. if (eicr & IXGBE_EICR_LSC)
  2383. ixgbe_check_lsc(adapter);
  2384. if (eicr & IXGBE_EICR_MAILBOX)
  2385. ixgbe_msg_task(adapter);
  2386. switch (hw->mac.type) {
  2387. case ixgbe_mac_82599EB:
  2388. case ixgbe_mac_X540:
  2389. case ixgbe_mac_X550:
  2390. case ixgbe_mac_X550EM_x:
  2391. case ixgbe_mac_x550em_a:
  2392. if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
  2393. (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
  2394. adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
  2395. ixgbe_service_event_schedule(adapter);
  2396. IXGBE_WRITE_REG(hw, IXGBE_EICR,
  2397. IXGBE_EICR_GPI_SDP0_X540);
  2398. }
  2399. if (eicr & IXGBE_EICR_ECC) {
  2400. e_info(link, "Received ECC Err, initiating reset\n");
  2401. set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
  2402. ixgbe_service_event_schedule(adapter);
  2403. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
  2404. }
  2405. /* Handle Flow Director Full threshold interrupt */
  2406. if (eicr & IXGBE_EICR_FLOW_DIR) {
  2407. int reinit_count = 0;
  2408. int i;
  2409. for (i = 0; i < adapter->num_tx_queues; i++) {
  2410. struct ixgbe_ring *ring = adapter->tx_ring[i];
  2411. if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
  2412. &ring->state))
  2413. reinit_count++;
  2414. }
  2415. if (reinit_count) {
  2416. /* no more flow director interrupts until after init */
  2417. IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
  2418. adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
  2419. ixgbe_service_event_schedule(adapter);
  2420. }
  2421. }
  2422. ixgbe_check_sfp_event(adapter, eicr);
  2423. ixgbe_check_overtemp_event(adapter, eicr);
  2424. break;
  2425. default:
  2426. break;
  2427. }
  2428. ixgbe_check_fan_failure(adapter, eicr);
  2429. if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
  2430. ixgbe_ptp_check_pps_event(adapter);
  2431. /* re-enable the original interrupt state, no lsc, no queues */
  2432. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2433. ixgbe_irq_enable(adapter, false, false);
  2434. return IRQ_HANDLED;
  2435. }
  2436. static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
  2437. {
  2438. struct ixgbe_q_vector *q_vector = data;
  2439. /* EIAM disabled interrupts (on this vector) for us */
  2440. if (q_vector->rx.ring || q_vector->tx.ring)
  2441. napi_schedule_irqoff(&q_vector->napi);
  2442. return IRQ_HANDLED;
  2443. }
  2444. /**
  2445. * ixgbe_poll - NAPI Rx polling callback
  2446. * @napi: structure for representing this polling device
  2447. * @budget: how many packets driver is allowed to clean
  2448. *
  2449. * This function is used for legacy and MSI, NAPI mode
  2450. **/
  2451. int ixgbe_poll(struct napi_struct *napi, int budget)
  2452. {
  2453. struct ixgbe_q_vector *q_vector =
  2454. container_of(napi, struct ixgbe_q_vector, napi);
  2455. struct ixgbe_adapter *adapter = q_vector->adapter;
  2456. struct ixgbe_ring *ring;
  2457. int per_ring_budget, work_done = 0;
  2458. bool clean_complete = true;
  2459. #ifdef CONFIG_IXGBE_DCA
  2460. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
  2461. ixgbe_update_dca(q_vector);
  2462. #endif
  2463. ixgbe_for_each_ring(ring, q_vector->tx) {
  2464. if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
  2465. clean_complete = false;
  2466. }
  2467. /* Exit if we are called by netpoll or busy polling is active */
  2468. if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
  2469. return budget;
  2470. /* attempt to distribute budget to each queue fairly, but don't allow
  2471. * the budget to go below 1 because we'll exit polling */
  2472. if (q_vector->rx.count > 1)
  2473. per_ring_budget = max(budget/q_vector->rx.count, 1);
  2474. else
  2475. per_ring_budget = budget;
  2476. ixgbe_for_each_ring(ring, q_vector->rx) {
  2477. int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
  2478. per_ring_budget);
  2479. work_done += cleaned;
  2480. if (cleaned >= per_ring_budget)
  2481. clean_complete = false;
  2482. }
  2483. ixgbe_qv_unlock_napi(q_vector);
  2484. /* If all work not completed, return budget and keep polling */
  2485. if (!clean_complete)
  2486. return budget;
  2487. /* all work done, exit the polling mode */
  2488. napi_complete_done(napi, work_done);
  2489. if (adapter->rx_itr_setting & 1)
  2490. ixgbe_set_itr(q_vector);
  2491. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2492. ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
  2493. return min(work_done, budget - 1);
  2494. }
  2495. /**
  2496. * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
  2497. * @adapter: board private structure
  2498. *
  2499. * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
  2500. * interrupts from the kernel.
  2501. **/
  2502. static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
  2503. {
  2504. struct net_device *netdev = adapter->netdev;
  2505. int vector, err;
  2506. int ri = 0, ti = 0;
  2507. for (vector = 0; vector < adapter->num_q_vectors; vector++) {
  2508. struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
  2509. struct msix_entry *entry = &adapter->msix_entries[vector];
  2510. if (q_vector->tx.ring && q_vector->rx.ring) {
  2511. snprintf(q_vector->name, sizeof(q_vector->name) - 1,
  2512. "%s-%s-%d", netdev->name, "TxRx", ri++);
  2513. ti++;
  2514. } else if (q_vector->rx.ring) {
  2515. snprintf(q_vector->name, sizeof(q_vector->name) - 1,
  2516. "%s-%s-%d", netdev->name, "rx", ri++);
  2517. } else if (q_vector->tx.ring) {
  2518. snprintf(q_vector->name, sizeof(q_vector->name) - 1,
  2519. "%s-%s-%d", netdev->name, "tx", ti++);
  2520. } else {
  2521. /* skip this unused q_vector */
  2522. continue;
  2523. }
  2524. err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
  2525. q_vector->name, q_vector);
  2526. if (err) {
  2527. e_err(probe, "request_irq failed for MSIX interrupt "
  2528. "Error: %d\n", err);
  2529. goto free_queue_irqs;
  2530. }
  2531. /* If Flow Director is enabled, set interrupt affinity */
  2532. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
  2533. /* assign the mask for this irq */
  2534. irq_set_affinity_hint(entry->vector,
  2535. &q_vector->affinity_mask);
  2536. }
  2537. }
  2538. err = request_irq(adapter->msix_entries[vector].vector,
  2539. ixgbe_msix_other, 0, netdev->name, adapter);
  2540. if (err) {
  2541. e_err(probe, "request_irq for msix_other failed: %d\n", err);
  2542. goto free_queue_irqs;
  2543. }
  2544. return 0;
  2545. free_queue_irqs:
  2546. while (vector) {
  2547. vector--;
  2548. irq_set_affinity_hint(adapter->msix_entries[vector].vector,
  2549. NULL);
  2550. free_irq(adapter->msix_entries[vector].vector,
  2551. adapter->q_vector[vector]);
  2552. }
  2553. adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
  2554. pci_disable_msix(adapter->pdev);
  2555. kfree(adapter->msix_entries);
  2556. adapter->msix_entries = NULL;
  2557. return err;
  2558. }
  2559. /**
  2560. * ixgbe_intr - legacy mode Interrupt Handler
  2561. * @irq: interrupt number
  2562. * @data: pointer to a network interface device structure
  2563. **/
  2564. static irqreturn_t ixgbe_intr(int irq, void *data)
  2565. {
  2566. struct ixgbe_adapter *adapter = data;
  2567. struct ixgbe_hw *hw = &adapter->hw;
  2568. struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
  2569. u32 eicr;
  2570. /*
  2571. * Workaround for silicon errata #26 on 82598. Mask the interrupt
  2572. * before the read of EICR.
  2573. */
  2574. IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
  2575. /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
  2576. * therefore no explicit interrupt disable is necessary */
  2577. eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
  2578. if (!eicr) {
  2579. /*
  2580. * shared interrupt alert!
  2581. * make sure interrupts are enabled because the read will
  2582. * have disabled interrupts due to EIAM
  2583. * finish the workaround of silicon errata on 82598. Unmask
  2584. * the interrupt that we masked before the EICR read.
  2585. */
  2586. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2587. ixgbe_irq_enable(adapter, true, true);
  2588. return IRQ_NONE; /* Not our interrupt */
  2589. }
  2590. if (eicr & IXGBE_EICR_LSC)
  2591. ixgbe_check_lsc(adapter);
  2592. switch (hw->mac.type) {
  2593. case ixgbe_mac_82599EB:
  2594. ixgbe_check_sfp_event(adapter, eicr);
  2595. /* Fall through */
  2596. case ixgbe_mac_X540:
  2597. case ixgbe_mac_X550:
  2598. case ixgbe_mac_X550EM_x:
  2599. case ixgbe_mac_x550em_a:
  2600. if (eicr & IXGBE_EICR_ECC) {
  2601. e_info(link, "Received ECC Err, initiating reset\n");
  2602. set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
  2603. ixgbe_service_event_schedule(adapter);
  2604. IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
  2605. }
  2606. ixgbe_check_overtemp_event(adapter, eicr);
  2607. break;
  2608. default:
  2609. break;
  2610. }
  2611. ixgbe_check_fan_failure(adapter, eicr);
  2612. if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
  2613. ixgbe_ptp_check_pps_event(adapter);
  2614. /* would disable interrupts here but EIAM disabled it */
  2615. napi_schedule_irqoff(&q_vector->napi);
  2616. /*
  2617. * re-enable link(maybe) and non-queue interrupts, no flush.
  2618. * ixgbe_poll will re-enable the queue interrupts
  2619. */
  2620. if (!test_bit(__IXGBE_DOWN, &adapter->state))
  2621. ixgbe_irq_enable(adapter, false, false);
  2622. return IRQ_HANDLED;
  2623. }
  2624. /**
  2625. * ixgbe_request_irq - initialize interrupts
  2626. * @adapter: board private structure
  2627. *
  2628. * Attempts to configure interrupts using the best available
  2629. * capabilities of the hardware and kernel.
  2630. **/
  2631. static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
  2632. {
  2633. struct net_device *netdev = adapter->netdev;
  2634. int err;
  2635. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  2636. err = ixgbe_request_msix_irqs(adapter);
  2637. else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
  2638. err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
  2639. netdev->name, adapter);
  2640. else
  2641. err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
  2642. netdev->name, adapter);
  2643. if (err)
  2644. e_err(probe, "request_irq failed, Error %d\n", err);
  2645. return err;
  2646. }
  2647. static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  2648. {
  2649. int vector;
  2650. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
  2651. free_irq(adapter->pdev->irq, adapter);
  2652. return;
  2653. }
  2654. for (vector = 0; vector < adapter->num_q_vectors; vector++) {
  2655. struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
  2656. struct msix_entry *entry = &adapter->msix_entries[vector];
  2657. /* free only the irqs that were actually requested */
  2658. if (!q_vector->rx.ring && !q_vector->tx.ring)
  2659. continue;
  2660. /* clear the affinity_mask in the IRQ descriptor */
  2661. irq_set_affinity_hint(entry->vector, NULL);
  2662. free_irq(entry->vector, q_vector);
  2663. }
  2664. free_irq(adapter->msix_entries[vector].vector, adapter);
  2665. }
  2666. /**
  2667. * ixgbe_irq_disable - Mask off interrupt generation on the NIC
  2668. * @adapter: board private structure
  2669. **/
  2670. static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
  2671. {
  2672. switch (adapter->hw.mac.type) {
  2673. case ixgbe_mac_82598EB:
  2674. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
  2675. break;
  2676. case ixgbe_mac_82599EB:
  2677. case ixgbe_mac_X540:
  2678. case ixgbe_mac_X550:
  2679. case ixgbe_mac_X550EM_x:
  2680. case ixgbe_mac_x550em_a:
  2681. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
  2682. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
  2683. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
  2684. break;
  2685. default:
  2686. break;
  2687. }
  2688. IXGBE_WRITE_FLUSH(&adapter->hw);
  2689. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  2690. int vector;
  2691. for (vector = 0; vector < adapter->num_q_vectors; vector++)
  2692. synchronize_irq(adapter->msix_entries[vector].vector);
  2693. synchronize_irq(adapter->msix_entries[vector++].vector);
  2694. } else {
  2695. synchronize_irq(adapter->pdev->irq);
  2696. }
  2697. }
  2698. /**
  2699. * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
  2700. *
  2701. **/
  2702. static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
  2703. {
  2704. struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
  2705. ixgbe_write_eitr(q_vector);
  2706. ixgbe_set_ivar(adapter, 0, 0, 0);
  2707. ixgbe_set_ivar(adapter, 1, 0, 0);
  2708. e_info(hw, "Legacy interrupt IVAR setup done\n");
  2709. }
  2710. /**
  2711. * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
  2712. * @adapter: board private structure
  2713. * @ring: structure containing ring specific data
  2714. *
  2715. * Configure the Tx descriptor ring after a reset.
  2716. **/
  2717. void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
  2718. struct ixgbe_ring *ring)
  2719. {
  2720. struct ixgbe_hw *hw = &adapter->hw;
  2721. u64 tdba = ring->dma;
  2722. int wait_loop = 10;
  2723. u32 txdctl = IXGBE_TXDCTL_ENABLE;
  2724. u8 reg_idx = ring->reg_idx;
  2725. /* disable queue to avoid issues while updating state */
  2726. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
  2727. IXGBE_WRITE_FLUSH(hw);
  2728. IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
  2729. (tdba & DMA_BIT_MASK(32)));
  2730. IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
  2731. IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
  2732. ring->count * sizeof(union ixgbe_adv_tx_desc));
  2733. IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
  2734. IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
  2735. ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx);
  2736. /*
  2737. * set WTHRESH to encourage burst writeback, it should not be set
  2738. * higher than 1 when:
  2739. * - ITR is 0 as it could cause false TX hangs
  2740. * - ITR is set to > 100k int/sec and BQL is enabled
  2741. *
  2742. * In order to avoid issues WTHRESH + PTHRESH should always be equal
  2743. * to or less than the number of on chip descriptors, which is
  2744. * currently 40.
  2745. */
  2746. if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
  2747. txdctl |= 1u << 16; /* WTHRESH = 1 */
  2748. else
  2749. txdctl |= 8u << 16; /* WTHRESH = 8 */
  2750. /*
  2751. * Setting PTHRESH to 32 both improves performance
  2752. * and avoids a TX hang with DFP enabled
  2753. */
  2754. txdctl |= (1u << 8) | /* HTHRESH = 1 */
  2755. 32; /* PTHRESH = 32 */
  2756. /* reinitialize flowdirector state */
  2757. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
  2758. ring->atr_sample_rate = adapter->atr_sample_rate;
  2759. ring->atr_count = 0;
  2760. set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
  2761. } else {
  2762. ring->atr_sample_rate = 0;
  2763. }
  2764. /* initialize XPS */
  2765. if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
  2766. struct ixgbe_q_vector *q_vector = ring->q_vector;
  2767. if (q_vector)
  2768. netif_set_xps_queue(ring->netdev,
  2769. &q_vector->affinity_mask,
  2770. ring->queue_index);
  2771. }
  2772. clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
  2773. /* enable queue */
  2774. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
  2775. /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
  2776. if (hw->mac.type == ixgbe_mac_82598EB &&
  2777. !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
  2778. return;
  2779. /* poll to verify queue is enabled */
  2780. do {
  2781. usleep_range(1000, 2000);
  2782. txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
  2783. } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
  2784. if (!wait_loop)
  2785. hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
  2786. }
  2787. static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
  2788. {
  2789. struct ixgbe_hw *hw = &adapter->hw;
  2790. u32 rttdcs, mtqc;
  2791. u8 tcs = netdev_get_num_tc(adapter->netdev);
  2792. if (hw->mac.type == ixgbe_mac_82598EB)
  2793. return;
  2794. /* disable the arbiter while setting MTQC */
  2795. rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
  2796. rttdcs |= IXGBE_RTTDCS_ARBDIS;
  2797. IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
  2798. /* set transmit pool layout */
  2799. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  2800. mtqc = IXGBE_MTQC_VT_ENA;
  2801. if (tcs > 4)
  2802. mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
  2803. else if (tcs > 1)
  2804. mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
  2805. else if (adapter->ring_feature[RING_F_VMDQ].mask ==
  2806. IXGBE_82599_VMDQ_4Q_MASK)
  2807. mtqc |= IXGBE_MTQC_32VF;
  2808. else
  2809. mtqc |= IXGBE_MTQC_64VF;
  2810. } else {
  2811. if (tcs > 4)
  2812. mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
  2813. else if (tcs > 1)
  2814. mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
  2815. else
  2816. mtqc = IXGBE_MTQC_64Q_1PB;
  2817. }
  2818. IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
  2819. /* Enable Security TX Buffer IFG for multiple pb */
  2820. if (tcs) {
  2821. u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
  2822. sectx |= IXGBE_SECTX_DCB;
  2823. IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
  2824. }
  2825. /* re-enable the arbiter */
  2826. rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
  2827. IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
  2828. }
  2829. /**
  2830. * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
  2831. * @adapter: board private structure
  2832. *
  2833. * Configure the Tx unit of the MAC after a reset.
  2834. **/
  2835. static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
  2836. {
  2837. struct ixgbe_hw *hw = &adapter->hw;
  2838. u32 dmatxctl;
  2839. u32 i;
  2840. ixgbe_setup_mtqc(adapter);
  2841. if (hw->mac.type != ixgbe_mac_82598EB) {
  2842. /* DMATXCTL.EN must be before Tx queues are enabled */
  2843. dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
  2844. dmatxctl |= IXGBE_DMATXCTL_TE;
  2845. IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
  2846. }
  2847. /* Setup the HW Tx Head and Tail descriptor pointers */
  2848. for (i = 0; i < adapter->num_tx_queues; i++)
  2849. ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
  2850. }
  2851. static void ixgbe_enable_rx_drop(struct ixgbe_adapter *adapter,
  2852. struct ixgbe_ring *ring)
  2853. {
  2854. struct ixgbe_hw *hw = &adapter->hw;
  2855. u8 reg_idx = ring->reg_idx;
  2856. u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
  2857. srrctl |= IXGBE_SRRCTL_DROP_EN;
  2858. IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
  2859. }
  2860. static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter,
  2861. struct ixgbe_ring *ring)
  2862. {
  2863. struct ixgbe_hw *hw = &adapter->hw;
  2864. u8 reg_idx = ring->reg_idx;
  2865. u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(reg_idx));
  2866. srrctl &= ~IXGBE_SRRCTL_DROP_EN;
  2867. IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
  2868. }
  2869. #ifdef CONFIG_IXGBE_DCB
  2870. void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
  2871. #else
  2872. static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
  2873. #endif
  2874. {
  2875. int i;
  2876. bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
  2877. if (adapter->ixgbe_ieee_pfc)
  2878. pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
  2879. /*
  2880. * We should set the drop enable bit if:
  2881. * SR-IOV is enabled
  2882. * or
  2883. * Number of Rx queues > 1 and flow control is disabled
  2884. *
  2885. * This allows us to avoid head of line blocking for security
  2886. * and performance reasons.
  2887. */
  2888. if (adapter->num_vfs || (adapter->num_rx_queues > 1 &&
  2889. !(adapter->hw.fc.current_mode & ixgbe_fc_tx_pause) && !pfc_en)) {
  2890. for (i = 0; i < adapter->num_rx_queues; i++)
  2891. ixgbe_enable_rx_drop(adapter, adapter->rx_ring[i]);
  2892. } else {
  2893. for (i = 0; i < adapter->num_rx_queues; i++)
  2894. ixgbe_disable_rx_drop(adapter, adapter->rx_ring[i]);
  2895. }
  2896. }
  2897. #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
  2898. static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
  2899. struct ixgbe_ring *rx_ring)
  2900. {
  2901. struct ixgbe_hw *hw = &adapter->hw;
  2902. u32 srrctl;
  2903. u8 reg_idx = rx_ring->reg_idx;
  2904. if (hw->mac.type == ixgbe_mac_82598EB) {
  2905. u16 mask = adapter->ring_feature[RING_F_RSS].mask;
  2906. /*
  2907. * if VMDq is not active we must program one srrctl register
  2908. * per RSS queue since we have enabled RDRXCTL.MVMEN
  2909. */
  2910. reg_idx &= mask;
  2911. }
  2912. /* configure header buffer length, needed for RSC */
  2913. srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
  2914. /* configure the packet buffer length */
  2915. srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
  2916. /* configure descriptor type */
  2917. srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
  2918. IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
  2919. }
  2920. /**
  2921. * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries
  2922. * @adapter: device handle
  2923. *
  2924. * - 82598/82599/X540: 128
  2925. * - X550(non-SRIOV mode): 512
  2926. * - X550(SRIOV mode): 64
  2927. */
  2928. u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
  2929. {
  2930. if (adapter->hw.mac.type < ixgbe_mac_X550)
  2931. return 128;
  2932. else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  2933. return 64;
  2934. else
  2935. return 512;
  2936. }
  2937. /**
  2938. * ixgbe_store_reta - Write the RETA table to HW
  2939. * @adapter: device handle
  2940. *
  2941. * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
  2942. */
  2943. void ixgbe_store_reta(struct ixgbe_adapter *adapter)
  2944. {
  2945. u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
  2946. struct ixgbe_hw *hw = &adapter->hw;
  2947. u32 reta = 0;
  2948. u32 indices_multi;
  2949. u8 *indir_tbl = adapter->rss_indir_tbl;
  2950. /* Fill out the redirection table as follows:
  2951. * - 82598: 8 bit wide entries containing pair of 4 bit RSS
  2952. * indices.
  2953. * - 82599/X540: 8 bit wide entries containing 4 bit RSS index
  2954. * - X550: 8 bit wide entries containing 6 bit RSS index
  2955. */
  2956. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  2957. indices_multi = 0x11;
  2958. else
  2959. indices_multi = 0x1;
  2960. /* Write redirection table to HW */
  2961. for (i = 0; i < reta_entries; i++) {
  2962. reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8;
  2963. if ((i & 3) == 3) {
  2964. if (i < 128)
  2965. IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
  2966. else
  2967. IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
  2968. reta);
  2969. reta = 0;
  2970. }
  2971. }
  2972. }
  2973. /**
  2974. * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode)
  2975. * @adapter: device handle
  2976. *
  2977. * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
  2978. */
  2979. static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter)
  2980. {
  2981. u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
  2982. struct ixgbe_hw *hw = &adapter->hw;
  2983. u32 vfreta = 0;
  2984. unsigned int pf_pool = adapter->num_vfs;
  2985. /* Write redirection table to HW */
  2986. for (i = 0; i < reta_entries; i++) {
  2987. vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8;
  2988. if ((i & 3) == 3) {
  2989. IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool),
  2990. vfreta);
  2991. vfreta = 0;
  2992. }
  2993. }
  2994. }
  2995. static void ixgbe_setup_reta(struct ixgbe_adapter *adapter)
  2996. {
  2997. struct ixgbe_hw *hw = &adapter->hw;
  2998. u32 i, j;
  2999. u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter);
  3000. u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
  3001. /* Program table for at least 4 queues w/ SR-IOV so that VFs can
  3002. * make full use of any rings they may have. We will use the
  3003. * PSRTYPE register to control how many rings we use within the PF.
  3004. */
  3005. if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4))
  3006. rss_i = 4;
  3007. /* Fill out hash function seeds */
  3008. for (i = 0; i < 10; i++)
  3009. IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]);
  3010. /* Fill out redirection table */
  3011. memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl));
  3012. for (i = 0, j = 0; i < reta_entries; i++, j++) {
  3013. if (j == rss_i)
  3014. j = 0;
  3015. adapter->rss_indir_tbl[i] = j;
  3016. }
  3017. ixgbe_store_reta(adapter);
  3018. }
  3019. static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter)
  3020. {
  3021. struct ixgbe_hw *hw = &adapter->hw;
  3022. u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
  3023. unsigned int pf_pool = adapter->num_vfs;
  3024. int i, j;
  3025. /* Fill out hash function seeds */
  3026. for (i = 0; i < 10; i++)
  3027. IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool),
  3028. adapter->rss_key[i]);
  3029. /* Fill out the redirection table */
  3030. for (i = 0, j = 0; i < 64; i++, j++) {
  3031. if (j == rss_i)
  3032. j = 0;
  3033. adapter->rss_indir_tbl[i] = j;
  3034. }
  3035. ixgbe_store_vfreta(adapter);
  3036. }
  3037. static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
  3038. {
  3039. struct ixgbe_hw *hw = &adapter->hw;
  3040. u32 mrqc = 0, rss_field = 0, vfmrqc = 0;
  3041. u32 rxcsum;
  3042. /* Disable indicating checksum in descriptor, enables RSS hash */
  3043. rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
  3044. rxcsum |= IXGBE_RXCSUM_PCSD;
  3045. IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
  3046. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  3047. if (adapter->ring_feature[RING_F_RSS].mask)
  3048. mrqc = IXGBE_MRQC_RSSEN;
  3049. } else {
  3050. u8 tcs = netdev_get_num_tc(adapter->netdev);
  3051. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  3052. if (tcs > 4)
  3053. mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */
  3054. else if (tcs > 1)
  3055. mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */
  3056. else if (adapter->ring_feature[RING_F_VMDQ].mask ==
  3057. IXGBE_82599_VMDQ_4Q_MASK)
  3058. mrqc = IXGBE_MRQC_VMDQRSS32EN;
  3059. else
  3060. mrqc = IXGBE_MRQC_VMDQRSS64EN;
  3061. } else {
  3062. if (tcs > 4)
  3063. mrqc = IXGBE_MRQC_RTRSS8TCEN;
  3064. else if (tcs > 1)
  3065. mrqc = IXGBE_MRQC_RTRSS4TCEN;
  3066. else
  3067. mrqc = IXGBE_MRQC_RSSEN;
  3068. }
  3069. }
  3070. /* Perform hash on these packet types */
  3071. rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4 |
  3072. IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
  3073. IXGBE_MRQC_RSS_FIELD_IPV6 |
  3074. IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
  3075. if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
  3076. rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
  3077. if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
  3078. rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
  3079. netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
  3080. if ((hw->mac.type >= ixgbe_mac_X550) &&
  3081. (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
  3082. unsigned int pf_pool = adapter->num_vfs;
  3083. /* Enable VF RSS mode */
  3084. mrqc |= IXGBE_MRQC_MULTIPLE_RSS;
  3085. IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
  3086. /* Setup RSS through the VF registers */
  3087. ixgbe_setup_vfreta(adapter);
  3088. vfmrqc = IXGBE_MRQC_RSSEN;
  3089. vfmrqc |= rss_field;
  3090. IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc);
  3091. } else {
  3092. ixgbe_setup_reta(adapter);
  3093. mrqc |= rss_field;
  3094. IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
  3095. }
  3096. }
  3097. /**
  3098. * ixgbe_configure_rscctl - enable RSC for the indicated ring
  3099. * @adapter: address of board private structure
  3100. * @index: index of ring to set
  3101. **/
  3102. static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
  3103. struct ixgbe_ring *ring)
  3104. {
  3105. struct ixgbe_hw *hw = &adapter->hw;
  3106. u32 rscctrl;
  3107. u8 reg_idx = ring->reg_idx;
  3108. if (!ring_is_rsc_enabled(ring))
  3109. return;
  3110. rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
  3111. rscctrl |= IXGBE_RSCCTL_RSCEN;
  3112. /*
  3113. * we must limit the number of descriptors so that the
  3114. * total size of max desc * buf_len is not greater
  3115. * than 65536
  3116. */
  3117. rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
  3118. IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
  3119. }
  3120. #define IXGBE_MAX_RX_DESC_POLL 10
  3121. static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
  3122. struct ixgbe_ring *ring)
  3123. {
  3124. struct ixgbe_hw *hw = &adapter->hw;
  3125. int wait_loop = IXGBE_MAX_RX_DESC_POLL;
  3126. u32 rxdctl;
  3127. u8 reg_idx = ring->reg_idx;
  3128. if (ixgbe_removed(hw->hw_addr))
  3129. return;
  3130. /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
  3131. if (hw->mac.type == ixgbe_mac_82598EB &&
  3132. !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
  3133. return;
  3134. do {
  3135. usleep_range(1000, 2000);
  3136. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
  3137. } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
  3138. if (!wait_loop) {
  3139. e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
  3140. "the polling period\n", reg_idx);
  3141. }
  3142. }
  3143. void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
  3144. struct ixgbe_ring *ring)
  3145. {
  3146. struct ixgbe_hw *hw = &adapter->hw;
  3147. int wait_loop = IXGBE_MAX_RX_DESC_POLL;
  3148. u32 rxdctl;
  3149. u8 reg_idx = ring->reg_idx;
  3150. if (ixgbe_removed(hw->hw_addr))
  3151. return;
  3152. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
  3153. rxdctl &= ~IXGBE_RXDCTL_ENABLE;
  3154. /* write value back with RXDCTL.ENABLE bit cleared */
  3155. IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
  3156. if (hw->mac.type == ixgbe_mac_82598EB &&
  3157. !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
  3158. return;
  3159. /* the hardware may take up to 100us to really disable the rx queue */
  3160. do {
  3161. udelay(10);
  3162. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
  3163. } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
  3164. if (!wait_loop) {
  3165. e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
  3166. "the polling period\n", reg_idx);
  3167. }
  3168. }
  3169. void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
  3170. struct ixgbe_ring *ring)
  3171. {
  3172. struct ixgbe_hw *hw = &adapter->hw;
  3173. u64 rdba = ring->dma;
  3174. u32 rxdctl;
  3175. u8 reg_idx = ring->reg_idx;
  3176. /* disable queue to avoid issues while updating state */
  3177. rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
  3178. ixgbe_disable_rx_queue(adapter, ring);
  3179. IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
  3180. IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
  3181. IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
  3182. ring->count * sizeof(union ixgbe_adv_rx_desc));
  3183. /* Force flushing of IXGBE_RDLEN to prevent MDD */
  3184. IXGBE_WRITE_FLUSH(hw);
  3185. IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
  3186. IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
  3187. ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
  3188. ixgbe_configure_srrctl(adapter, ring);
  3189. ixgbe_configure_rscctl(adapter, ring);
  3190. if (hw->mac.type == ixgbe_mac_82598EB) {
  3191. /*
  3192. * enable cache line friendly hardware writes:
  3193. * PTHRESH=32 descriptors (half the internal cache),
  3194. * this also removes ugly rx_no_buffer_count increment
  3195. * HTHRESH=4 descriptors (to minimize latency on fetch)
  3196. * WTHRESH=8 burst writeback up to two cache lines
  3197. */
  3198. rxdctl &= ~0x3FFFFF;
  3199. rxdctl |= 0x080420;
  3200. }
  3201. /* enable receive descriptor ring */
  3202. rxdctl |= IXGBE_RXDCTL_ENABLE;
  3203. IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
  3204. ixgbe_rx_desc_queue_enable(adapter, ring);
  3205. ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
  3206. }
  3207. static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
  3208. {
  3209. struct ixgbe_hw *hw = &adapter->hw;
  3210. int rss_i = adapter->ring_feature[RING_F_RSS].indices;
  3211. u16 pool;
  3212. /* PSRTYPE must be initialized in non 82598 adapters */
  3213. u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
  3214. IXGBE_PSRTYPE_UDPHDR |
  3215. IXGBE_PSRTYPE_IPV4HDR |
  3216. IXGBE_PSRTYPE_L2HDR |
  3217. IXGBE_PSRTYPE_IPV6HDR;
  3218. if (hw->mac.type == ixgbe_mac_82598EB)
  3219. return;
  3220. if (rss_i > 3)
  3221. psrtype |= 2u << 29;
  3222. else if (rss_i > 1)
  3223. psrtype |= 1u << 29;
  3224. for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
  3225. IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
  3226. }
  3227. static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
  3228. {
  3229. struct ixgbe_hw *hw = &adapter->hw;
  3230. u32 reg_offset, vf_shift;
  3231. u32 gcr_ext, vmdctl;
  3232. int i;
  3233. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  3234. return;
  3235. vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
  3236. vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
  3237. vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
  3238. vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
  3239. vmdctl |= IXGBE_VT_CTL_REPLEN;
  3240. IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
  3241. vf_shift = VMDQ_P(0) % 32;
  3242. reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
  3243. /* Enable only the PF's pool for Tx/Rx */
  3244. IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
  3245. IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
  3246. IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
  3247. IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
  3248. if (adapter->bridge_mode == BRIDGE_MODE_VEB)
  3249. IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
  3250. /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
  3251. hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
  3252. /* clear VLAN promisc flag so VFTA will be updated if necessary */
  3253. adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
  3254. /*
  3255. * Set up VF register offsets for selected VT Mode,
  3256. * i.e. 32 or 64 VFs for SR-IOV
  3257. */
  3258. switch (adapter->ring_feature[RING_F_VMDQ].mask) {
  3259. case IXGBE_82599_VMDQ_8Q_MASK:
  3260. gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
  3261. break;
  3262. case IXGBE_82599_VMDQ_4Q_MASK:
  3263. gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
  3264. break;
  3265. default:
  3266. gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
  3267. break;
  3268. }
  3269. IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
  3270. for (i = 0; i < adapter->num_vfs; i++) {
  3271. /* configure spoof checking */
  3272. ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
  3273. adapter->vfinfo[i].spoofchk_enabled);
  3274. /* Enable/Disable RSS query feature */
  3275. ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
  3276. adapter->vfinfo[i].rss_query_enabled);
  3277. }
  3278. }
  3279. static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
  3280. {
  3281. struct ixgbe_hw *hw = &adapter->hw;
  3282. struct net_device *netdev = adapter->netdev;
  3283. int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  3284. struct ixgbe_ring *rx_ring;
  3285. int i;
  3286. u32 mhadd, hlreg0;
  3287. #ifdef IXGBE_FCOE
  3288. /* adjust max frame to be able to do baby jumbo for FCoE */
  3289. if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
  3290. (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
  3291. max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
  3292. #endif /* IXGBE_FCOE */
  3293. /* adjust max frame to be at least the size of a standard frame */
  3294. if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
  3295. max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
  3296. mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
  3297. if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
  3298. mhadd &= ~IXGBE_MHADD_MFS_MASK;
  3299. mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
  3300. IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
  3301. }
  3302. hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
  3303. /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
  3304. hlreg0 |= IXGBE_HLREG0_JUMBOEN;
  3305. IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
  3306. /*
  3307. * Setup the HW Rx Head and Tail Descriptor Pointers and
  3308. * the Base and Length of the Rx Descriptor Ring
  3309. */
  3310. for (i = 0; i < adapter->num_rx_queues; i++) {
  3311. rx_ring = adapter->rx_ring[i];
  3312. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
  3313. set_ring_rsc_enabled(rx_ring);
  3314. else
  3315. clear_ring_rsc_enabled(rx_ring);
  3316. }
  3317. }
  3318. static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
  3319. {
  3320. struct ixgbe_hw *hw = &adapter->hw;
  3321. u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
  3322. switch (hw->mac.type) {
  3323. case ixgbe_mac_82598EB:
  3324. /*
  3325. * For VMDq support of different descriptor types or
  3326. * buffer sizes through the use of multiple SRRCTL
  3327. * registers, RDRXCTL.MVMEN must be set to 1
  3328. *
  3329. * also, the manual doesn't mention it clearly but DCA hints
  3330. * will only use queue 0's tags unless this bit is set. Side
  3331. * effects of setting this bit are only that SRRCTL must be
  3332. * fully programmed [0..15]
  3333. */
  3334. rdrxctl |= IXGBE_RDRXCTL_MVMEN;
  3335. break;
  3336. case ixgbe_mac_X550:
  3337. case ixgbe_mac_X550EM_x:
  3338. case ixgbe_mac_x550em_a:
  3339. if (adapter->num_vfs)
  3340. rdrxctl |= IXGBE_RDRXCTL_PSP;
  3341. /* fall through for older HW */
  3342. case ixgbe_mac_82599EB:
  3343. case ixgbe_mac_X540:
  3344. /* Disable RSC for ACK packets */
  3345. IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
  3346. (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
  3347. rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
  3348. /* hardware requires some bits to be set by default */
  3349. rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX);
  3350. rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
  3351. break;
  3352. default:
  3353. /* We should do nothing since we don't know this hardware */
  3354. return;
  3355. }
  3356. IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
  3357. }
  3358. /**
  3359. * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
  3360. * @adapter: board private structure
  3361. *
  3362. * Configure the Rx unit of the MAC after a reset.
  3363. **/
  3364. static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
  3365. {
  3366. struct ixgbe_hw *hw = &adapter->hw;
  3367. int i;
  3368. u32 rxctrl, rfctl;
  3369. /* disable receives while setting up the descriptors */
  3370. hw->mac.ops.disable_rx(hw);
  3371. ixgbe_setup_psrtype(adapter);
  3372. ixgbe_setup_rdrxctl(adapter);
  3373. /* RSC Setup */
  3374. rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
  3375. rfctl &= ~IXGBE_RFCTL_RSC_DIS;
  3376. if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
  3377. rfctl |= IXGBE_RFCTL_RSC_DIS;
  3378. /* disable NFS filtering */
  3379. rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS);
  3380. IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
  3381. /* Program registers for the distribution of queues */
  3382. ixgbe_setup_mrqc(adapter);
  3383. /* set_rx_buffer_len must be called before ring initialization */
  3384. ixgbe_set_rx_buffer_len(adapter);
  3385. /*
  3386. * Setup the HW Rx Head and Tail Descriptor Pointers and
  3387. * the Base and Length of the Rx Descriptor Ring
  3388. */
  3389. for (i = 0; i < adapter->num_rx_queues; i++)
  3390. ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
  3391. rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
  3392. /* disable drop enable for 82598 parts */
  3393. if (hw->mac.type == ixgbe_mac_82598EB)
  3394. rxctrl |= IXGBE_RXCTRL_DMBYPS;
  3395. /* enable all receives */
  3396. rxctrl |= IXGBE_RXCTRL_RXEN;
  3397. hw->mac.ops.enable_rx_dma(hw, rxctrl);
  3398. }
  3399. static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
  3400. __be16 proto, u16 vid)
  3401. {
  3402. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3403. struct ixgbe_hw *hw = &adapter->hw;
  3404. /* add VID to filter table */
  3405. if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
  3406. hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
  3407. set_bit(vid, adapter->active_vlans);
  3408. return 0;
  3409. }
  3410. static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
  3411. {
  3412. u32 vlvf;
  3413. int idx;
  3414. /* short cut the special case */
  3415. if (vlan == 0)
  3416. return 0;
  3417. /* Search for the vlan id in the VLVF entries */
  3418. for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
  3419. vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
  3420. if ((vlvf & VLAN_VID_MASK) == vlan)
  3421. break;
  3422. }
  3423. return idx;
  3424. }
  3425. void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
  3426. {
  3427. struct ixgbe_hw *hw = &adapter->hw;
  3428. u32 bits, word;
  3429. int idx;
  3430. idx = ixgbe_find_vlvf_entry(hw, vid);
  3431. if (!idx)
  3432. return;
  3433. /* See if any other pools are set for this VLAN filter
  3434. * entry other than the PF.
  3435. */
  3436. word = idx * 2 + (VMDQ_P(0) / 32);
  3437. bits = ~BIT(VMDQ_P(0) % 32);
  3438. bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
  3439. /* Disable the filter so this falls into the default pool. */
  3440. if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
  3441. if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
  3442. IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
  3443. IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
  3444. }
  3445. }
  3446. static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
  3447. __be16 proto, u16 vid)
  3448. {
  3449. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3450. struct ixgbe_hw *hw = &adapter->hw;
  3451. /* remove VID from filter table */
  3452. if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
  3453. hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
  3454. clear_bit(vid, adapter->active_vlans);
  3455. return 0;
  3456. }
  3457. /**
  3458. * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
  3459. * @adapter: driver data
  3460. */
  3461. static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
  3462. {
  3463. struct ixgbe_hw *hw = &adapter->hw;
  3464. u32 vlnctrl;
  3465. int i, j;
  3466. switch (hw->mac.type) {
  3467. case ixgbe_mac_82598EB:
  3468. vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  3469. vlnctrl &= ~IXGBE_VLNCTRL_VME;
  3470. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  3471. break;
  3472. case ixgbe_mac_82599EB:
  3473. case ixgbe_mac_X540:
  3474. case ixgbe_mac_X550:
  3475. case ixgbe_mac_X550EM_x:
  3476. case ixgbe_mac_x550em_a:
  3477. for (i = 0; i < adapter->num_rx_queues; i++) {
  3478. struct ixgbe_ring *ring = adapter->rx_ring[i];
  3479. if (ring->l2_accel_priv)
  3480. continue;
  3481. j = ring->reg_idx;
  3482. vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
  3483. vlnctrl &= ~IXGBE_RXDCTL_VME;
  3484. IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
  3485. }
  3486. break;
  3487. default:
  3488. break;
  3489. }
  3490. }
  3491. /**
  3492. * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping
  3493. * @adapter: driver data
  3494. */
  3495. static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
  3496. {
  3497. struct ixgbe_hw *hw = &adapter->hw;
  3498. u32 vlnctrl;
  3499. int i, j;
  3500. switch (hw->mac.type) {
  3501. case ixgbe_mac_82598EB:
  3502. vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  3503. vlnctrl |= IXGBE_VLNCTRL_VME;
  3504. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  3505. break;
  3506. case ixgbe_mac_82599EB:
  3507. case ixgbe_mac_X540:
  3508. case ixgbe_mac_X550:
  3509. case ixgbe_mac_X550EM_x:
  3510. case ixgbe_mac_x550em_a:
  3511. for (i = 0; i < adapter->num_rx_queues; i++) {
  3512. struct ixgbe_ring *ring = adapter->rx_ring[i];
  3513. if (ring->l2_accel_priv)
  3514. continue;
  3515. j = ring->reg_idx;
  3516. vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
  3517. vlnctrl |= IXGBE_RXDCTL_VME;
  3518. IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
  3519. }
  3520. break;
  3521. default:
  3522. break;
  3523. }
  3524. }
  3525. static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
  3526. {
  3527. struct ixgbe_hw *hw = &adapter->hw;
  3528. u32 vlnctrl, i;
  3529. vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  3530. if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
  3531. /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
  3532. vlnctrl |= IXGBE_VLNCTRL_VFE;
  3533. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  3534. } else {
  3535. vlnctrl &= ~IXGBE_VLNCTRL_VFE;
  3536. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  3537. return;
  3538. }
  3539. /* Nothing to do for 82598 */
  3540. if (hw->mac.type == ixgbe_mac_82598EB)
  3541. return;
  3542. /* We are already in VLAN promisc, nothing to do */
  3543. if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
  3544. return;
  3545. /* Set flag so we don't redo unnecessary work */
  3546. adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
  3547. /* Add PF to all active pools */
  3548. for (i = IXGBE_VLVF_ENTRIES; --i;) {
  3549. u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
  3550. u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
  3551. vlvfb |= BIT(VMDQ_P(0) % 32);
  3552. IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
  3553. }
  3554. /* Set all bits in the VLAN filter table array */
  3555. for (i = hw->mac.vft_size; i--;)
  3556. IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
  3557. }
  3558. #define VFTA_BLOCK_SIZE 8
  3559. static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
  3560. {
  3561. struct ixgbe_hw *hw = &adapter->hw;
  3562. u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
  3563. u32 vid_start = vfta_offset * 32;
  3564. u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
  3565. u32 i, vid, word, bits;
  3566. for (i = IXGBE_VLVF_ENTRIES; --i;) {
  3567. u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
  3568. /* pull VLAN ID from VLVF */
  3569. vid = vlvf & VLAN_VID_MASK;
  3570. /* only concern outselves with a certain range */
  3571. if (vid < vid_start || vid >= vid_end)
  3572. continue;
  3573. if (vlvf) {
  3574. /* record VLAN ID in VFTA */
  3575. vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
  3576. /* if PF is part of this then continue */
  3577. if (test_bit(vid, adapter->active_vlans))
  3578. continue;
  3579. }
  3580. /* remove PF from the pool */
  3581. word = i * 2 + VMDQ_P(0) / 32;
  3582. bits = ~BIT(VMDQ_P(0) % 32);
  3583. bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
  3584. IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
  3585. }
  3586. /* extract values from active_vlans and write back to VFTA */
  3587. for (i = VFTA_BLOCK_SIZE; i--;) {
  3588. vid = (vfta_offset + i) * 32;
  3589. word = vid / BITS_PER_LONG;
  3590. bits = vid % BITS_PER_LONG;
  3591. vfta[i] |= adapter->active_vlans[word] >> bits;
  3592. IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
  3593. }
  3594. }
  3595. static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
  3596. {
  3597. struct ixgbe_hw *hw = &adapter->hw;
  3598. u32 vlnctrl, i;
  3599. /* Set VLAN filtering to enabled */
  3600. vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
  3601. vlnctrl |= IXGBE_VLNCTRL_VFE;
  3602. IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
  3603. if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ||
  3604. hw->mac.type == ixgbe_mac_82598EB)
  3605. return;
  3606. /* We are not in VLAN promisc, nothing to do */
  3607. if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
  3608. return;
  3609. /* Set flag so we don't redo unnecessary work */
  3610. adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
  3611. for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
  3612. ixgbe_scrub_vfta(adapter, i);
  3613. }
  3614. static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
  3615. {
  3616. u16 vid = 1;
  3617. ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
  3618. for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
  3619. ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
  3620. }
  3621. /**
  3622. * ixgbe_write_mc_addr_list - write multicast addresses to MTA
  3623. * @netdev: network interface device structure
  3624. *
  3625. * Writes multicast address list to the MTA hash table.
  3626. * Returns: -ENOMEM on failure
  3627. * 0 on no addresses written
  3628. * X on writing X addresses to MTA
  3629. **/
  3630. static int ixgbe_write_mc_addr_list(struct net_device *netdev)
  3631. {
  3632. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3633. struct ixgbe_hw *hw = &adapter->hw;
  3634. if (!netif_running(netdev))
  3635. return 0;
  3636. if (hw->mac.ops.update_mc_addr_list)
  3637. hw->mac.ops.update_mc_addr_list(hw, netdev);
  3638. else
  3639. return -ENOMEM;
  3640. #ifdef CONFIG_PCI_IOV
  3641. ixgbe_restore_vf_multicasts(adapter);
  3642. #endif
  3643. return netdev_mc_count(netdev);
  3644. }
  3645. #ifdef CONFIG_PCI_IOV
  3646. void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
  3647. {
  3648. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  3649. struct ixgbe_hw *hw = &adapter->hw;
  3650. int i;
  3651. for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
  3652. mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
  3653. if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
  3654. hw->mac.ops.set_rar(hw, i,
  3655. mac_table->addr,
  3656. mac_table->pool,
  3657. IXGBE_RAH_AV);
  3658. else
  3659. hw->mac.ops.clear_rar(hw, i);
  3660. }
  3661. }
  3662. #endif
  3663. static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
  3664. {
  3665. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  3666. struct ixgbe_hw *hw = &adapter->hw;
  3667. int i;
  3668. for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
  3669. if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
  3670. continue;
  3671. mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
  3672. if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
  3673. hw->mac.ops.set_rar(hw, i,
  3674. mac_table->addr,
  3675. mac_table->pool,
  3676. IXGBE_RAH_AV);
  3677. else
  3678. hw->mac.ops.clear_rar(hw, i);
  3679. }
  3680. }
  3681. static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
  3682. {
  3683. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  3684. struct ixgbe_hw *hw = &adapter->hw;
  3685. int i;
  3686. for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
  3687. mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
  3688. mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
  3689. }
  3690. ixgbe_sync_mac_table(adapter);
  3691. }
  3692. static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
  3693. {
  3694. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  3695. struct ixgbe_hw *hw = &adapter->hw;
  3696. int i, count = 0;
  3697. for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
  3698. /* do not count default RAR as available */
  3699. if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
  3700. continue;
  3701. /* only count unused and addresses that belong to us */
  3702. if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
  3703. if (mac_table->pool != pool)
  3704. continue;
  3705. }
  3706. count++;
  3707. }
  3708. return count;
  3709. }
  3710. /* this function destroys the first RAR entry */
  3711. static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
  3712. {
  3713. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  3714. struct ixgbe_hw *hw = &adapter->hw;
  3715. memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
  3716. mac_table->pool = VMDQ_P(0);
  3717. mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
  3718. hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
  3719. IXGBE_RAH_AV);
  3720. }
  3721. int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
  3722. const u8 *addr, u16 pool)
  3723. {
  3724. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  3725. struct ixgbe_hw *hw = &adapter->hw;
  3726. int i;
  3727. if (is_zero_ether_addr(addr))
  3728. return -EINVAL;
  3729. for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
  3730. if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
  3731. continue;
  3732. ether_addr_copy(mac_table->addr, addr);
  3733. mac_table->pool = pool;
  3734. mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
  3735. IXGBE_MAC_STATE_IN_USE;
  3736. ixgbe_sync_mac_table(adapter);
  3737. return i;
  3738. }
  3739. return -ENOMEM;
  3740. }
  3741. int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
  3742. const u8 *addr, u16 pool)
  3743. {
  3744. struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
  3745. struct ixgbe_hw *hw = &adapter->hw;
  3746. int i;
  3747. if (is_zero_ether_addr(addr))
  3748. return -EINVAL;
  3749. /* search table for addr, if found clear IN_USE flag and sync */
  3750. for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
  3751. /* we can only delete an entry if it is in use */
  3752. if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
  3753. continue;
  3754. /* we only care about entries that belong to the given pool */
  3755. if (mac_table->pool != pool)
  3756. continue;
  3757. /* we only care about a specific MAC address */
  3758. if (!ether_addr_equal(addr, mac_table->addr))
  3759. continue;
  3760. mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
  3761. mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
  3762. ixgbe_sync_mac_table(adapter);
  3763. return 0;
  3764. }
  3765. return -ENOMEM;
  3766. }
  3767. /**
  3768. * ixgbe_write_uc_addr_list - write unicast addresses to RAR table
  3769. * @netdev: network interface device structure
  3770. *
  3771. * Writes unicast address list to the RAR table.
  3772. * Returns: -ENOMEM on failure/insufficient address space
  3773. * 0 on no addresses written
  3774. * X on writing X addresses to the RAR table
  3775. **/
  3776. static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
  3777. {
  3778. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3779. int count = 0;
  3780. /* return ENOMEM indicating insufficient memory for addresses */
  3781. if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
  3782. return -ENOMEM;
  3783. if (!netdev_uc_empty(netdev)) {
  3784. struct netdev_hw_addr *ha;
  3785. netdev_for_each_uc_addr(ha, netdev) {
  3786. ixgbe_del_mac_filter(adapter, ha->addr, vfn);
  3787. ixgbe_add_mac_filter(adapter, ha->addr, vfn);
  3788. count++;
  3789. }
  3790. }
  3791. return count;
  3792. }
  3793. static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
  3794. {
  3795. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3796. int ret;
  3797. ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
  3798. return min_t(int, ret, 0);
  3799. }
  3800. static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
  3801. {
  3802. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3803. ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
  3804. return 0;
  3805. }
  3806. /**
  3807. * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
  3808. * @netdev: network interface device structure
  3809. *
  3810. * The set_rx_method entry point is called whenever the unicast/multicast
  3811. * address list or the network interface flags are updated. This routine is
  3812. * responsible for configuring the hardware for proper unicast, multicast and
  3813. * promiscuous mode.
  3814. **/
  3815. void ixgbe_set_rx_mode(struct net_device *netdev)
  3816. {
  3817. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  3818. struct ixgbe_hw *hw = &adapter->hw;
  3819. u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
  3820. netdev_features_t features = netdev->features;
  3821. int count;
  3822. /* Check for Promiscuous and All Multicast modes */
  3823. fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  3824. /* set all bits that we expect to always be set */
  3825. fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
  3826. fctrl |= IXGBE_FCTRL_BAM;
  3827. fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
  3828. fctrl |= IXGBE_FCTRL_PMCF;
  3829. /* clear the bits we are changing the status of */
  3830. fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
  3831. if (netdev->flags & IFF_PROMISC) {
  3832. hw->addr_ctrl.user_set_promisc = true;
  3833. fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
  3834. vmolr |= IXGBE_VMOLR_MPE;
  3835. features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
  3836. } else {
  3837. if (netdev->flags & IFF_ALLMULTI) {
  3838. fctrl |= IXGBE_FCTRL_MPE;
  3839. vmolr |= IXGBE_VMOLR_MPE;
  3840. }
  3841. hw->addr_ctrl.user_set_promisc = false;
  3842. }
  3843. /*
  3844. * Write addresses to available RAR registers, if there is not
  3845. * sufficient space to store all the addresses then enable
  3846. * unicast promiscuous mode
  3847. */
  3848. if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
  3849. fctrl |= IXGBE_FCTRL_UPE;
  3850. vmolr |= IXGBE_VMOLR_ROPE;
  3851. }
  3852. /* Write addresses to the MTA, if the attempt fails
  3853. * then we should just turn on promiscuous mode so
  3854. * that we can at least receive multicast traffic
  3855. */
  3856. count = ixgbe_write_mc_addr_list(netdev);
  3857. if (count < 0) {
  3858. fctrl |= IXGBE_FCTRL_MPE;
  3859. vmolr |= IXGBE_VMOLR_MPE;
  3860. } else if (count) {
  3861. vmolr |= IXGBE_VMOLR_ROMPE;
  3862. }
  3863. if (hw->mac.type != ixgbe_mac_82598EB) {
  3864. vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
  3865. ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
  3866. IXGBE_VMOLR_ROPE);
  3867. IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
  3868. }
  3869. /* This is useful for sniffing bad packets. */
  3870. if (features & NETIF_F_RXALL) {
  3871. /* UPE and MPE will be handled by normal PROMISC logic
  3872. * in e1000e_set_rx_mode */
  3873. fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
  3874. IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */
  3875. IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */
  3876. fctrl &= ~(IXGBE_FCTRL_DPF);
  3877. /* NOTE: VLAN filtering is disabled by setting PROMISC */
  3878. }
  3879. IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
  3880. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  3881. ixgbe_vlan_strip_enable(adapter);
  3882. else
  3883. ixgbe_vlan_strip_disable(adapter);
  3884. if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
  3885. ixgbe_vlan_promisc_disable(adapter);
  3886. else
  3887. ixgbe_vlan_promisc_enable(adapter);
  3888. }
  3889. static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
  3890. {
  3891. int q_idx;
  3892. for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
  3893. ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
  3894. napi_enable(&adapter->q_vector[q_idx]->napi);
  3895. }
  3896. }
  3897. static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
  3898. {
  3899. int q_idx;
  3900. for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
  3901. napi_disable(&adapter->q_vector[q_idx]->napi);
  3902. while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
  3903. pr_info("QV %d locked\n", q_idx);
  3904. usleep_range(1000, 20000);
  3905. }
  3906. }
  3907. }
  3908. static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
  3909. {
  3910. struct ixgbe_hw *hw = &adapter->hw;
  3911. u32 vxlanctrl;
  3912. if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
  3913. IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
  3914. return;
  3915. vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
  3916. IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
  3917. if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
  3918. adapter->vxlan_port = 0;
  3919. if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
  3920. adapter->geneve_port = 0;
  3921. }
  3922. #ifdef CONFIG_IXGBE_DCB
  3923. /**
  3924. * ixgbe_configure_dcb - Configure DCB hardware
  3925. * @adapter: ixgbe adapter struct
  3926. *
  3927. * This is called by the driver on open to configure the DCB hardware.
  3928. * This is also called by the gennetlink interface when reconfiguring
  3929. * the DCB state.
  3930. */
  3931. static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
  3932. {
  3933. struct ixgbe_hw *hw = &adapter->hw;
  3934. int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
  3935. if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
  3936. if (hw->mac.type == ixgbe_mac_82598EB)
  3937. netif_set_gso_max_size(adapter->netdev, 65536);
  3938. return;
  3939. }
  3940. if (hw->mac.type == ixgbe_mac_82598EB)
  3941. netif_set_gso_max_size(adapter->netdev, 32768);
  3942. #ifdef IXGBE_FCOE
  3943. if (adapter->netdev->features & NETIF_F_FCOE_MTU)
  3944. max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
  3945. #endif
  3946. /* reconfigure the hardware */
  3947. if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
  3948. ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
  3949. DCB_TX_CONFIG);
  3950. ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
  3951. DCB_RX_CONFIG);
  3952. ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
  3953. } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) {
  3954. ixgbe_dcb_hw_ets(&adapter->hw,
  3955. adapter->ixgbe_ieee_ets,
  3956. max_frame);
  3957. ixgbe_dcb_hw_pfc_config(&adapter->hw,
  3958. adapter->ixgbe_ieee_pfc->pfc_en,
  3959. adapter->ixgbe_ieee_ets->prio_tc);
  3960. }
  3961. /* Enable RSS Hash per TC */
  3962. if (hw->mac.type != ixgbe_mac_82598EB) {
  3963. u32 msb = 0;
  3964. u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
  3965. while (rss_i) {
  3966. msb++;
  3967. rss_i >>= 1;
  3968. }
  3969. /* write msb to all 8 TCs in one write */
  3970. IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
  3971. }
  3972. }
  3973. #endif
  3974. /* Additional bittime to account for IXGBE framing */
  3975. #define IXGBE_ETH_FRAMING 20
  3976. /**
  3977. * ixgbe_hpbthresh - calculate high water mark for flow control
  3978. *
  3979. * @adapter: board private structure to calculate for
  3980. * @pb: packet buffer to calculate
  3981. */
  3982. static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
  3983. {
  3984. struct ixgbe_hw *hw = &adapter->hw;
  3985. struct net_device *dev = adapter->netdev;
  3986. int link, tc, kb, marker;
  3987. u32 dv_id, rx_pba;
  3988. /* Calculate max LAN frame size */
  3989. tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING;
  3990. #ifdef IXGBE_FCOE
  3991. /* FCoE traffic class uses FCOE jumbo frames */
  3992. if ((dev->features & NETIF_F_FCOE_MTU) &&
  3993. (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
  3994. (pb == ixgbe_fcoe_get_tc(adapter)))
  3995. tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
  3996. #endif
  3997. /* Calculate delay value for device */
  3998. switch (hw->mac.type) {
  3999. case ixgbe_mac_X540:
  4000. case ixgbe_mac_X550:
  4001. case ixgbe_mac_X550EM_x:
  4002. case ixgbe_mac_x550em_a:
  4003. dv_id = IXGBE_DV_X540(link, tc);
  4004. break;
  4005. default:
  4006. dv_id = IXGBE_DV(link, tc);
  4007. break;
  4008. }
  4009. /* Loopback switch introduces additional latency */
  4010. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  4011. dv_id += IXGBE_B2BT(tc);
  4012. /* Delay value is calculated in bit times convert to KB */
  4013. kb = IXGBE_BT2KB(dv_id);
  4014. rx_pba = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(pb)) >> 10;
  4015. marker = rx_pba - kb;
  4016. /* It is possible that the packet buffer is not large enough
  4017. * to provide required headroom. In this case throw an error
  4018. * to user and a do the best we can.
  4019. */
  4020. if (marker < 0) {
  4021. e_warn(drv, "Packet Buffer(%i) can not provide enough"
  4022. "headroom to support flow control."
  4023. "Decrease MTU or number of traffic classes\n", pb);
  4024. marker = tc + 1;
  4025. }
  4026. return marker;
  4027. }
  4028. /**
  4029. * ixgbe_lpbthresh - calculate low water mark for for flow control
  4030. *
  4031. * @adapter: board private structure to calculate for
  4032. * @pb: packet buffer to calculate
  4033. */
  4034. static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
  4035. {
  4036. struct ixgbe_hw *hw = &adapter->hw;
  4037. struct net_device *dev = adapter->netdev;
  4038. int tc;
  4039. u32 dv_id;
  4040. /* Calculate max LAN frame size */
  4041. tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
  4042. #ifdef IXGBE_FCOE
  4043. /* FCoE traffic class uses FCOE jumbo frames */
  4044. if ((dev->features & NETIF_F_FCOE_MTU) &&
  4045. (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
  4046. (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
  4047. tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
  4048. #endif
  4049. /* Calculate delay value for device */
  4050. switch (hw->mac.type) {
  4051. case ixgbe_mac_X540:
  4052. case ixgbe_mac_X550:
  4053. case ixgbe_mac_X550EM_x:
  4054. case ixgbe_mac_x550em_a:
  4055. dv_id = IXGBE_LOW_DV_X540(tc);
  4056. break;
  4057. default:
  4058. dv_id = IXGBE_LOW_DV(tc);
  4059. break;
  4060. }
  4061. /* Delay value is calculated in bit times convert to KB */
  4062. return IXGBE_BT2KB(dv_id);
  4063. }
  4064. /*
  4065. * ixgbe_pbthresh_setup - calculate and setup high low water marks
  4066. */
  4067. static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
  4068. {
  4069. struct ixgbe_hw *hw = &adapter->hw;
  4070. int num_tc = netdev_get_num_tc(adapter->netdev);
  4071. int i;
  4072. if (!num_tc)
  4073. num_tc = 1;
  4074. for (i = 0; i < num_tc; i++) {
  4075. hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
  4076. hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
  4077. /* Low water marks must not be larger than high water marks */
  4078. if (hw->fc.low_water[i] > hw->fc.high_water[i])
  4079. hw->fc.low_water[i] = 0;
  4080. }
  4081. for (; i < MAX_TRAFFIC_CLASS; i++)
  4082. hw->fc.high_water[i] = 0;
  4083. }
  4084. static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
  4085. {
  4086. struct ixgbe_hw *hw = &adapter->hw;
  4087. int hdrm;
  4088. u8 tc = netdev_get_num_tc(adapter->netdev);
  4089. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
  4090. adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
  4091. hdrm = 32 << adapter->fdir_pballoc;
  4092. else
  4093. hdrm = 0;
  4094. hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
  4095. ixgbe_pbthresh_setup(adapter);
  4096. }
  4097. static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
  4098. {
  4099. struct ixgbe_hw *hw = &adapter->hw;
  4100. struct hlist_node *node2;
  4101. struct ixgbe_fdir_filter *filter;
  4102. spin_lock(&adapter->fdir_perfect_lock);
  4103. if (!hlist_empty(&adapter->fdir_filter_list))
  4104. ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
  4105. hlist_for_each_entry_safe(filter, node2,
  4106. &adapter->fdir_filter_list, fdir_node) {
  4107. ixgbe_fdir_write_perfect_filter_82599(hw,
  4108. &filter->filter,
  4109. filter->sw_idx,
  4110. (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
  4111. IXGBE_FDIR_DROP_QUEUE :
  4112. adapter->rx_ring[filter->action]->reg_idx);
  4113. }
  4114. spin_unlock(&adapter->fdir_perfect_lock);
  4115. }
  4116. static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
  4117. struct ixgbe_adapter *adapter)
  4118. {
  4119. struct ixgbe_hw *hw = &adapter->hw;
  4120. u32 vmolr;
  4121. /* No unicast promiscuous support for VMDQ devices. */
  4122. vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
  4123. vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
  4124. /* clear the affected bit */
  4125. vmolr &= ~IXGBE_VMOLR_MPE;
  4126. if (dev->flags & IFF_ALLMULTI) {
  4127. vmolr |= IXGBE_VMOLR_MPE;
  4128. } else {
  4129. vmolr |= IXGBE_VMOLR_ROMPE;
  4130. hw->mac.ops.update_mc_addr_list(hw, dev);
  4131. }
  4132. ixgbe_write_uc_addr_list(adapter->netdev, pool);
  4133. IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
  4134. }
  4135. static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
  4136. {
  4137. struct ixgbe_adapter *adapter = vadapter->real_adapter;
  4138. int rss_i = adapter->num_rx_queues_per_pool;
  4139. struct ixgbe_hw *hw = &adapter->hw;
  4140. u16 pool = vadapter->pool;
  4141. u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
  4142. IXGBE_PSRTYPE_UDPHDR |
  4143. IXGBE_PSRTYPE_IPV4HDR |
  4144. IXGBE_PSRTYPE_L2HDR |
  4145. IXGBE_PSRTYPE_IPV6HDR;
  4146. if (hw->mac.type == ixgbe_mac_82598EB)
  4147. return;
  4148. if (rss_i > 3)
  4149. psrtype |= 2u << 29;
  4150. else if (rss_i > 1)
  4151. psrtype |= 1u << 29;
  4152. IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
  4153. }
  4154. /**
  4155. * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
  4156. * @rx_ring: ring to free buffers from
  4157. **/
  4158. static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
  4159. {
  4160. struct device *dev = rx_ring->dev;
  4161. unsigned long size;
  4162. u16 i;
  4163. /* ring already cleared, nothing to do */
  4164. if (!rx_ring->rx_buffer_info)
  4165. return;
  4166. /* Free all the Rx ring sk_buffs */
  4167. for (i = 0; i < rx_ring->count; i++) {
  4168. struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
  4169. if (rx_buffer->skb) {
  4170. struct sk_buff *skb = rx_buffer->skb;
  4171. if (IXGBE_CB(skb)->page_released)
  4172. dma_unmap_page(dev,
  4173. IXGBE_CB(skb)->dma,
  4174. ixgbe_rx_bufsz(rx_ring),
  4175. DMA_FROM_DEVICE);
  4176. dev_kfree_skb(skb);
  4177. rx_buffer->skb = NULL;
  4178. }
  4179. if (!rx_buffer->page)
  4180. continue;
  4181. dma_unmap_page(dev, rx_buffer->dma,
  4182. ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
  4183. __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
  4184. rx_buffer->page = NULL;
  4185. }
  4186. size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
  4187. memset(rx_ring->rx_buffer_info, 0, size);
  4188. /* Zero out the descriptor ring */
  4189. memset(rx_ring->desc, 0, rx_ring->size);
  4190. rx_ring->next_to_alloc = 0;
  4191. rx_ring->next_to_clean = 0;
  4192. rx_ring->next_to_use = 0;
  4193. }
  4194. static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
  4195. struct ixgbe_ring *rx_ring)
  4196. {
  4197. struct ixgbe_adapter *adapter = vadapter->real_adapter;
  4198. int index = rx_ring->queue_index + vadapter->rx_base_queue;
  4199. /* shutdown specific queue receive and wait for dma to settle */
  4200. ixgbe_disable_rx_queue(adapter, rx_ring);
  4201. usleep_range(10000, 20000);
  4202. ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
  4203. ixgbe_clean_rx_ring(rx_ring);
  4204. rx_ring->l2_accel_priv = NULL;
  4205. }
  4206. static int ixgbe_fwd_ring_down(struct net_device *vdev,
  4207. struct ixgbe_fwd_adapter *accel)
  4208. {
  4209. struct ixgbe_adapter *adapter = accel->real_adapter;
  4210. unsigned int rxbase = accel->rx_base_queue;
  4211. unsigned int txbase = accel->tx_base_queue;
  4212. int i;
  4213. netif_tx_stop_all_queues(vdev);
  4214. for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
  4215. ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
  4216. adapter->rx_ring[rxbase + i]->netdev = adapter->netdev;
  4217. }
  4218. for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
  4219. adapter->tx_ring[txbase + i]->l2_accel_priv = NULL;
  4220. adapter->tx_ring[txbase + i]->netdev = adapter->netdev;
  4221. }
  4222. return 0;
  4223. }
  4224. static int ixgbe_fwd_ring_up(struct net_device *vdev,
  4225. struct ixgbe_fwd_adapter *accel)
  4226. {
  4227. struct ixgbe_adapter *adapter = accel->real_adapter;
  4228. unsigned int rxbase, txbase, queues;
  4229. int i, baseq, err = 0;
  4230. if (!test_bit(accel->pool, &adapter->fwd_bitmask))
  4231. return 0;
  4232. baseq = accel->pool * adapter->num_rx_queues_per_pool;
  4233. netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
  4234. accel->pool, adapter->num_rx_pools,
  4235. baseq, baseq + adapter->num_rx_queues_per_pool,
  4236. adapter->fwd_bitmask);
  4237. accel->netdev = vdev;
  4238. accel->rx_base_queue = rxbase = baseq;
  4239. accel->tx_base_queue = txbase = baseq;
  4240. for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
  4241. ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]);
  4242. for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
  4243. adapter->rx_ring[rxbase + i]->netdev = vdev;
  4244. adapter->rx_ring[rxbase + i]->l2_accel_priv = accel;
  4245. ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]);
  4246. }
  4247. for (i = 0; i < adapter->num_rx_queues_per_pool; i++) {
  4248. adapter->tx_ring[txbase + i]->netdev = vdev;
  4249. adapter->tx_ring[txbase + i]->l2_accel_priv = accel;
  4250. }
  4251. queues = min_t(unsigned int,
  4252. adapter->num_rx_queues_per_pool, vdev->num_tx_queues);
  4253. err = netif_set_real_num_tx_queues(vdev, queues);
  4254. if (err)
  4255. goto fwd_queue_err;
  4256. err = netif_set_real_num_rx_queues(vdev, queues);
  4257. if (err)
  4258. goto fwd_queue_err;
  4259. if (is_valid_ether_addr(vdev->dev_addr))
  4260. ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool);
  4261. ixgbe_fwd_psrtype(accel);
  4262. ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
  4263. return err;
  4264. fwd_queue_err:
  4265. ixgbe_fwd_ring_down(vdev, accel);
  4266. return err;
  4267. }
  4268. static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter)
  4269. {
  4270. struct net_device *upper;
  4271. struct list_head *iter;
  4272. int err;
  4273. netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
  4274. if (netif_is_macvlan(upper)) {
  4275. struct macvlan_dev *dfwd = netdev_priv(upper);
  4276. struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
  4277. if (dfwd->fwd_priv) {
  4278. err = ixgbe_fwd_ring_up(upper, vadapter);
  4279. if (err)
  4280. continue;
  4281. }
  4282. }
  4283. }
  4284. }
  4285. static void ixgbe_configure(struct ixgbe_adapter *adapter)
  4286. {
  4287. struct ixgbe_hw *hw = &adapter->hw;
  4288. ixgbe_configure_pb(adapter);
  4289. #ifdef CONFIG_IXGBE_DCB
  4290. ixgbe_configure_dcb(adapter);
  4291. #endif
  4292. /*
  4293. * We must restore virtualization before VLANs or else
  4294. * the VLVF registers will not be populated
  4295. */
  4296. ixgbe_configure_virtualization(adapter);
  4297. ixgbe_set_rx_mode(adapter->netdev);
  4298. ixgbe_restore_vlan(adapter);
  4299. switch (hw->mac.type) {
  4300. case ixgbe_mac_82599EB:
  4301. case ixgbe_mac_X540:
  4302. hw->mac.ops.disable_rx_buff(hw);
  4303. break;
  4304. default:
  4305. break;
  4306. }
  4307. if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
  4308. ixgbe_init_fdir_signature_82599(&adapter->hw,
  4309. adapter->fdir_pballoc);
  4310. } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
  4311. ixgbe_init_fdir_perfect_82599(&adapter->hw,
  4312. adapter->fdir_pballoc);
  4313. ixgbe_fdir_filter_restore(adapter);
  4314. }
  4315. switch (hw->mac.type) {
  4316. case ixgbe_mac_82599EB:
  4317. case ixgbe_mac_X540:
  4318. hw->mac.ops.enable_rx_buff(hw);
  4319. break;
  4320. default:
  4321. break;
  4322. }
  4323. #ifdef CONFIG_IXGBE_DCA
  4324. /* configure DCA */
  4325. if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE)
  4326. ixgbe_setup_dca(adapter);
  4327. #endif /* CONFIG_IXGBE_DCA */
  4328. #ifdef IXGBE_FCOE
  4329. /* configure FCoE L2 filters, redirection table, and Rx control */
  4330. ixgbe_configure_fcoe(adapter);
  4331. #endif /* IXGBE_FCOE */
  4332. ixgbe_configure_tx(adapter);
  4333. ixgbe_configure_rx(adapter);
  4334. ixgbe_configure_dfwd(adapter);
  4335. }
  4336. /**
  4337. * ixgbe_sfp_link_config - set up SFP+ link
  4338. * @adapter: pointer to private adapter struct
  4339. **/
  4340. static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
  4341. {
  4342. /*
  4343. * We are assuming the worst case scenario here, and that
  4344. * is that an SFP was inserted/removed after the reset
  4345. * but before SFP detection was enabled. As such the best
  4346. * solution is to just start searching as soon as we start
  4347. */
  4348. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  4349. adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
  4350. adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
  4351. adapter->sfp_poll_time = 0;
  4352. }
  4353. /**
  4354. * ixgbe_non_sfp_link_config - set up non-SFP+ link
  4355. * @hw: pointer to private hardware struct
  4356. *
  4357. * Returns 0 on success, negative on failure
  4358. **/
  4359. static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
  4360. {
  4361. u32 speed;
  4362. bool autoneg, link_up = false;
  4363. int ret = IXGBE_ERR_LINK_SETUP;
  4364. if (hw->mac.ops.check_link)
  4365. ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
  4366. if (ret)
  4367. return ret;
  4368. speed = hw->phy.autoneg_advertised;
  4369. if ((!speed) && (hw->mac.ops.get_link_capabilities))
  4370. ret = hw->mac.ops.get_link_capabilities(hw, &speed,
  4371. &autoneg);
  4372. if (ret)
  4373. return ret;
  4374. if (hw->mac.ops.setup_link)
  4375. ret = hw->mac.ops.setup_link(hw, speed, link_up);
  4376. return ret;
  4377. }
  4378. static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
  4379. {
  4380. struct ixgbe_hw *hw = &adapter->hw;
  4381. u32 gpie = 0;
  4382. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
  4383. gpie = IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
  4384. IXGBE_GPIE_OCD;
  4385. gpie |= IXGBE_GPIE_EIAME;
  4386. /*
  4387. * use EIAM to auto-mask when MSI-X interrupt is asserted
  4388. * this saves a register write for every interrupt
  4389. */
  4390. switch (hw->mac.type) {
  4391. case ixgbe_mac_82598EB:
  4392. IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
  4393. break;
  4394. case ixgbe_mac_82599EB:
  4395. case ixgbe_mac_X540:
  4396. case ixgbe_mac_X550:
  4397. case ixgbe_mac_X550EM_x:
  4398. case ixgbe_mac_x550em_a:
  4399. default:
  4400. IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
  4401. IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
  4402. break;
  4403. }
  4404. } else {
  4405. /* legacy interrupts, use EIAM to auto-mask when reading EICR,
  4406. * specifically only auto mask tx and rx interrupts */
  4407. IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
  4408. }
  4409. /* XXX: to interrupt immediately for EICS writes, enable this */
  4410. /* gpie |= IXGBE_GPIE_EIMEN; */
  4411. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  4412. gpie &= ~IXGBE_GPIE_VTMODE_MASK;
  4413. switch (adapter->ring_feature[RING_F_VMDQ].mask) {
  4414. case IXGBE_82599_VMDQ_8Q_MASK:
  4415. gpie |= IXGBE_GPIE_VTMODE_16;
  4416. break;
  4417. case IXGBE_82599_VMDQ_4Q_MASK:
  4418. gpie |= IXGBE_GPIE_VTMODE_32;
  4419. break;
  4420. default:
  4421. gpie |= IXGBE_GPIE_VTMODE_64;
  4422. break;
  4423. }
  4424. }
  4425. /* Enable Thermal over heat sensor interrupt */
  4426. if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) {
  4427. switch (adapter->hw.mac.type) {
  4428. case ixgbe_mac_82599EB:
  4429. gpie |= IXGBE_SDP0_GPIEN_8259X;
  4430. break;
  4431. default:
  4432. break;
  4433. }
  4434. }
  4435. /* Enable fan failure interrupt */
  4436. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
  4437. gpie |= IXGBE_SDP1_GPIEN(hw);
  4438. switch (hw->mac.type) {
  4439. case ixgbe_mac_82599EB:
  4440. gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
  4441. break;
  4442. case ixgbe_mac_X550EM_x:
  4443. case ixgbe_mac_x550em_a:
  4444. gpie |= IXGBE_SDP0_GPIEN_X540;
  4445. break;
  4446. default:
  4447. break;
  4448. }
  4449. IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
  4450. }
  4451. static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
  4452. {
  4453. struct ixgbe_hw *hw = &adapter->hw;
  4454. int err;
  4455. u32 ctrl_ext;
  4456. ixgbe_get_hw_control(adapter);
  4457. ixgbe_setup_gpie(adapter);
  4458. if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
  4459. ixgbe_configure_msix(adapter);
  4460. else
  4461. ixgbe_configure_msi_and_legacy(adapter);
  4462. /* enable the optics for 82599 SFP+ fiber */
  4463. if (hw->mac.ops.enable_tx_laser)
  4464. hw->mac.ops.enable_tx_laser(hw);
  4465. if (hw->phy.ops.set_phy_power)
  4466. hw->phy.ops.set_phy_power(hw, true);
  4467. smp_mb__before_atomic();
  4468. clear_bit(__IXGBE_DOWN, &adapter->state);
  4469. ixgbe_napi_enable_all(adapter);
  4470. if (ixgbe_is_sfp(hw)) {
  4471. ixgbe_sfp_link_config(adapter);
  4472. } else {
  4473. err = ixgbe_non_sfp_link_config(hw);
  4474. if (err)
  4475. e_err(probe, "link_config FAILED %d\n", err);
  4476. }
  4477. /* clear any pending interrupts, may auto mask */
  4478. IXGBE_READ_REG(hw, IXGBE_EICR);
  4479. ixgbe_irq_enable(adapter, true, true);
  4480. /*
  4481. * If this adapter has a fan, check to see if we had a failure
  4482. * before we enabled the interrupt.
  4483. */
  4484. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
  4485. u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  4486. if (esdp & IXGBE_ESDP_SDP1)
  4487. e_crit(drv, "Fan has stopped, replace the adapter\n");
  4488. }
  4489. /* bring the link up in the watchdog, this could race with our first
  4490. * link up interrupt but shouldn't be a problem */
  4491. adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
  4492. adapter->link_check_timeout = jiffies;
  4493. mod_timer(&adapter->service_timer, jiffies);
  4494. /* Set PF Reset Done bit so PF/VF Mail Ops can work */
  4495. ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
  4496. ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
  4497. IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
  4498. }
  4499. void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
  4500. {
  4501. WARN_ON(in_interrupt());
  4502. /* put off any impending NetWatchDogTimeout */
  4503. netif_trans_update(adapter->netdev);
  4504. while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
  4505. usleep_range(1000, 2000);
  4506. ixgbe_down(adapter);
  4507. /*
  4508. * If SR-IOV enabled then wait a bit before bringing the adapter
  4509. * back up to give the VFs time to respond to the reset. The
  4510. * two second wait is based upon the watchdog timer cycle in
  4511. * the VF driver.
  4512. */
  4513. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  4514. msleep(2000);
  4515. ixgbe_up(adapter);
  4516. clear_bit(__IXGBE_RESETTING, &adapter->state);
  4517. }
  4518. void ixgbe_up(struct ixgbe_adapter *adapter)
  4519. {
  4520. /* hardware has been reset, we need to reload some things */
  4521. ixgbe_configure(adapter);
  4522. ixgbe_up_complete(adapter);
  4523. }
  4524. void ixgbe_reset(struct ixgbe_adapter *adapter)
  4525. {
  4526. struct ixgbe_hw *hw = &adapter->hw;
  4527. struct net_device *netdev = adapter->netdev;
  4528. int err;
  4529. if (ixgbe_removed(hw->hw_addr))
  4530. return;
  4531. /* lock SFP init bit to prevent race conditions with the watchdog */
  4532. while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  4533. usleep_range(1000, 2000);
  4534. /* clear all SFP and link config related flags while holding SFP_INIT */
  4535. adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
  4536. IXGBE_FLAG2_SFP_NEEDS_RESET);
  4537. adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
  4538. err = hw->mac.ops.init_hw(hw);
  4539. switch (err) {
  4540. case 0:
  4541. case IXGBE_ERR_SFP_NOT_PRESENT:
  4542. case IXGBE_ERR_SFP_NOT_SUPPORTED:
  4543. break;
  4544. case IXGBE_ERR_MASTER_REQUESTS_PENDING:
  4545. e_dev_err("master disable timed out\n");
  4546. break;
  4547. case IXGBE_ERR_EEPROM_VERSION:
  4548. /* We are running on a pre-production device, log a warning */
  4549. e_dev_warn("This device is a pre-production adapter/LOM. "
  4550. "Please be aware there may be issues associated with "
  4551. "your hardware. If you are experiencing problems "
  4552. "please contact your Intel or hardware "
  4553. "representative who provided you with this "
  4554. "hardware.\n");
  4555. break;
  4556. default:
  4557. e_dev_err("Hardware Error: %d\n", err);
  4558. }
  4559. clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
  4560. /* flush entries out of MAC table */
  4561. ixgbe_flush_sw_mac_table(adapter);
  4562. __dev_uc_unsync(netdev, NULL);
  4563. /* do not flush user set addresses */
  4564. ixgbe_mac_set_default_filter(adapter);
  4565. /* update SAN MAC vmdq pool selection */
  4566. if (hw->mac.san_mac_rar_index)
  4567. hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
  4568. if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
  4569. ixgbe_ptp_reset(adapter);
  4570. if (hw->phy.ops.set_phy_power) {
  4571. if (!netif_running(adapter->netdev) && !adapter->wol)
  4572. hw->phy.ops.set_phy_power(hw, false);
  4573. else
  4574. hw->phy.ops.set_phy_power(hw, true);
  4575. }
  4576. }
  4577. /**
  4578. * ixgbe_clean_tx_ring - Free Tx Buffers
  4579. * @tx_ring: ring to be cleaned
  4580. **/
  4581. static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
  4582. {
  4583. struct ixgbe_tx_buffer *tx_buffer_info;
  4584. unsigned long size;
  4585. u16 i;
  4586. /* ring already cleared, nothing to do */
  4587. if (!tx_ring->tx_buffer_info)
  4588. return;
  4589. /* Free all the Tx ring sk_buffs */
  4590. for (i = 0; i < tx_ring->count; i++) {
  4591. tx_buffer_info = &tx_ring->tx_buffer_info[i];
  4592. ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
  4593. }
  4594. netdev_tx_reset_queue(txring_txq(tx_ring));
  4595. size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
  4596. memset(tx_ring->tx_buffer_info, 0, size);
  4597. /* Zero out the descriptor ring */
  4598. memset(tx_ring->desc, 0, tx_ring->size);
  4599. tx_ring->next_to_use = 0;
  4600. tx_ring->next_to_clean = 0;
  4601. }
  4602. /**
  4603. * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
  4604. * @adapter: board private structure
  4605. **/
  4606. static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
  4607. {
  4608. int i;
  4609. for (i = 0; i < adapter->num_rx_queues; i++)
  4610. ixgbe_clean_rx_ring(adapter->rx_ring[i]);
  4611. }
  4612. /**
  4613. * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
  4614. * @adapter: board private structure
  4615. **/
  4616. static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
  4617. {
  4618. int i;
  4619. for (i = 0; i < adapter->num_tx_queues; i++)
  4620. ixgbe_clean_tx_ring(adapter->tx_ring[i]);
  4621. }
  4622. static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
  4623. {
  4624. struct hlist_node *node2;
  4625. struct ixgbe_fdir_filter *filter;
  4626. spin_lock(&adapter->fdir_perfect_lock);
  4627. hlist_for_each_entry_safe(filter, node2,
  4628. &adapter->fdir_filter_list, fdir_node) {
  4629. hlist_del(&filter->fdir_node);
  4630. kfree(filter);
  4631. }
  4632. adapter->fdir_filter_count = 0;
  4633. spin_unlock(&adapter->fdir_perfect_lock);
  4634. }
  4635. void ixgbe_down(struct ixgbe_adapter *adapter)
  4636. {
  4637. struct net_device *netdev = adapter->netdev;
  4638. struct ixgbe_hw *hw = &adapter->hw;
  4639. struct net_device *upper;
  4640. struct list_head *iter;
  4641. int i;
  4642. /* signal that we are down to the interrupt handler */
  4643. if (test_and_set_bit(__IXGBE_DOWN, &adapter->state))
  4644. return; /* do nothing if already down */
  4645. /* disable receives */
  4646. hw->mac.ops.disable_rx(hw);
  4647. /* disable all enabled rx queues */
  4648. for (i = 0; i < adapter->num_rx_queues; i++)
  4649. /* this call also flushes the previous write */
  4650. ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
  4651. usleep_range(10000, 20000);
  4652. netif_tx_stop_all_queues(netdev);
  4653. /* call carrier off first to avoid false dev_watchdog timeouts */
  4654. netif_carrier_off(netdev);
  4655. netif_tx_disable(netdev);
  4656. /* disable any upper devices */
  4657. netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
  4658. if (netif_is_macvlan(upper)) {
  4659. struct macvlan_dev *vlan = netdev_priv(upper);
  4660. if (vlan->fwd_priv) {
  4661. netif_tx_stop_all_queues(upper);
  4662. netif_carrier_off(upper);
  4663. netif_tx_disable(upper);
  4664. }
  4665. }
  4666. }
  4667. ixgbe_irq_disable(adapter);
  4668. ixgbe_napi_disable_all(adapter);
  4669. clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
  4670. adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
  4671. adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
  4672. del_timer_sync(&adapter->service_timer);
  4673. if (adapter->num_vfs) {
  4674. /* Clear EITR Select mapping */
  4675. IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
  4676. /* Mark all the VFs as inactive */
  4677. for (i = 0 ; i < adapter->num_vfs; i++)
  4678. adapter->vfinfo[i].clear_to_send = false;
  4679. /* ping all the active vfs to let them know we are going down */
  4680. ixgbe_ping_all_vfs(adapter);
  4681. /* Disable all VFTE/VFRE TX/RX */
  4682. ixgbe_disable_tx_rx(adapter);
  4683. }
  4684. /* disable transmits in the hardware now that interrupts are off */
  4685. for (i = 0; i < adapter->num_tx_queues; i++) {
  4686. u8 reg_idx = adapter->tx_ring[i]->reg_idx;
  4687. IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
  4688. }
  4689. /* Disable the Tx DMA engine on 82599 and later MAC */
  4690. switch (hw->mac.type) {
  4691. case ixgbe_mac_82599EB:
  4692. case ixgbe_mac_X540:
  4693. case ixgbe_mac_X550:
  4694. case ixgbe_mac_X550EM_x:
  4695. case ixgbe_mac_x550em_a:
  4696. IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
  4697. (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
  4698. ~IXGBE_DMATXCTL_TE));
  4699. break;
  4700. default:
  4701. break;
  4702. }
  4703. if (!pci_channel_offline(adapter->pdev))
  4704. ixgbe_reset(adapter);
  4705. /* power down the optics for 82599 SFP+ fiber */
  4706. if (hw->mac.ops.disable_tx_laser)
  4707. hw->mac.ops.disable_tx_laser(hw);
  4708. ixgbe_clean_all_tx_rings(adapter);
  4709. ixgbe_clean_all_rx_rings(adapter);
  4710. }
  4711. /**
  4712. * ixgbe_tx_timeout - Respond to a Tx Hang
  4713. * @netdev: network interface device structure
  4714. **/
  4715. static void ixgbe_tx_timeout(struct net_device *netdev)
  4716. {
  4717. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  4718. /* Do the reset outside of interrupt context */
  4719. ixgbe_tx_timeout_reset(adapter);
  4720. }
  4721. #ifdef CONFIG_IXGBE_DCB
  4722. static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
  4723. {
  4724. struct ixgbe_hw *hw = &adapter->hw;
  4725. struct tc_configuration *tc;
  4726. int j;
  4727. switch (hw->mac.type) {
  4728. case ixgbe_mac_82598EB:
  4729. case ixgbe_mac_82599EB:
  4730. adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
  4731. adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
  4732. break;
  4733. case ixgbe_mac_X540:
  4734. case ixgbe_mac_X550:
  4735. adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
  4736. adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
  4737. break;
  4738. case ixgbe_mac_X550EM_x:
  4739. case ixgbe_mac_x550em_a:
  4740. default:
  4741. adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
  4742. adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
  4743. break;
  4744. }
  4745. /* Configure DCB traffic classes */
  4746. for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
  4747. tc = &adapter->dcb_cfg.tc_config[j];
  4748. tc->path[DCB_TX_CONFIG].bwg_id = 0;
  4749. tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
  4750. tc->path[DCB_RX_CONFIG].bwg_id = 0;
  4751. tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
  4752. tc->dcb_pfc = pfc_disabled;
  4753. }
  4754. /* Initialize default user to priority mapping, UPx->TC0 */
  4755. tc = &adapter->dcb_cfg.tc_config[0];
  4756. tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
  4757. tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
  4758. adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
  4759. adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
  4760. adapter->dcb_cfg.pfc_mode_enable = false;
  4761. adapter->dcb_set_bitmap = 0x00;
  4762. if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
  4763. adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
  4764. memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
  4765. sizeof(adapter->temp_dcb_cfg));
  4766. }
  4767. #endif
  4768. /**
  4769. * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
  4770. * @adapter: board private structure to initialize
  4771. *
  4772. * ixgbe_sw_init initializes the Adapter private data structure.
  4773. * Fields are initialized based on PCI device information and
  4774. * OS network device settings (MTU size).
  4775. **/
  4776. static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
  4777. {
  4778. struct ixgbe_hw *hw = &adapter->hw;
  4779. struct pci_dev *pdev = adapter->pdev;
  4780. unsigned int rss, fdir;
  4781. u32 fwsm;
  4782. int i;
  4783. /* PCI config space info */
  4784. hw->vendor_id = pdev->vendor;
  4785. hw->device_id = pdev->device;
  4786. hw->revision_id = pdev->revision;
  4787. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  4788. hw->subsystem_device_id = pdev->subsystem_device;
  4789. /* Set common capability flags and settings */
  4790. rss = min_t(int, ixgbe_max_rss_indices(adapter), num_online_cpus());
  4791. adapter->ring_feature[RING_F_RSS].limit = rss;
  4792. adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
  4793. adapter->max_q_vectors = MAX_Q_VECTORS_82599;
  4794. adapter->atr_sample_rate = 20;
  4795. fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
  4796. adapter->ring_feature[RING_F_FDIR].limit = fdir;
  4797. adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
  4798. #ifdef CONFIG_IXGBE_DCA
  4799. adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
  4800. #endif
  4801. #ifdef CONFIG_IXGBE_DCB
  4802. adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
  4803. adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
  4804. #endif
  4805. #ifdef IXGBE_FCOE
  4806. adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
  4807. adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
  4808. #ifdef CONFIG_IXGBE_DCB
  4809. /* Default traffic class to use for FCoE */
  4810. adapter->fcoe.up = IXGBE_FCOE_DEFTC;
  4811. #endif /* CONFIG_IXGBE_DCB */
  4812. #endif /* IXGBE_FCOE */
  4813. /* initialize static ixgbe jump table entries */
  4814. adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
  4815. GFP_KERNEL);
  4816. if (!adapter->jump_tables[0])
  4817. return -ENOMEM;
  4818. adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
  4819. for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
  4820. adapter->jump_tables[i] = NULL;
  4821. adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
  4822. hw->mac.num_rar_entries,
  4823. GFP_ATOMIC);
  4824. if (!adapter->mac_table)
  4825. return -ENOMEM;
  4826. /* Set MAC specific capability flags and exceptions */
  4827. switch (hw->mac.type) {
  4828. case ixgbe_mac_82598EB:
  4829. adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE;
  4830. if (hw->device_id == IXGBE_DEV_ID_82598AT)
  4831. adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
  4832. adapter->max_q_vectors = MAX_Q_VECTORS_82598;
  4833. adapter->ring_feature[RING_F_FDIR].limit = 0;
  4834. adapter->atr_sample_rate = 0;
  4835. adapter->fdir_pballoc = 0;
  4836. #ifdef IXGBE_FCOE
  4837. adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
  4838. adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
  4839. #ifdef CONFIG_IXGBE_DCB
  4840. adapter->fcoe.up = 0;
  4841. #endif /* IXGBE_DCB */
  4842. #endif /* IXGBE_FCOE */
  4843. break;
  4844. case ixgbe_mac_82599EB:
  4845. if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
  4846. adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
  4847. break;
  4848. case ixgbe_mac_X540:
  4849. fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
  4850. if (fwsm & IXGBE_FWSM_TS_ENABLED)
  4851. adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
  4852. break;
  4853. case ixgbe_mac_x550em_a:
  4854. adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
  4855. /* fall through */
  4856. case ixgbe_mac_X550EM_x:
  4857. #ifdef CONFIG_IXGBE_DCB
  4858. adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
  4859. #endif
  4860. #ifdef IXGBE_FCOE
  4861. adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
  4862. #ifdef CONFIG_IXGBE_DCB
  4863. adapter->fcoe.up = 0;
  4864. #endif /* IXGBE_DCB */
  4865. #endif /* IXGBE_FCOE */
  4866. /* Fall Through */
  4867. case ixgbe_mac_X550:
  4868. #ifdef CONFIG_IXGBE_DCA
  4869. adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
  4870. #endif
  4871. adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
  4872. break;
  4873. default:
  4874. break;
  4875. }
  4876. #ifdef IXGBE_FCOE
  4877. /* FCoE support exists, always init the FCoE lock */
  4878. spin_lock_init(&adapter->fcoe.lock);
  4879. #endif
  4880. /* n-tuple support exists, always init our spinlock */
  4881. spin_lock_init(&adapter->fdir_perfect_lock);
  4882. #ifdef CONFIG_IXGBE_DCB
  4883. ixgbe_init_dcb(adapter);
  4884. #endif
  4885. /* default flow control settings */
  4886. hw->fc.requested_mode = ixgbe_fc_full;
  4887. hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
  4888. ixgbe_pbthresh_setup(adapter);
  4889. hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
  4890. hw->fc.send_xon = true;
  4891. hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
  4892. #ifdef CONFIG_PCI_IOV
  4893. if (max_vfs > 0)
  4894. e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n");
  4895. /* assign number of SR-IOV VFs */
  4896. if (hw->mac.type != ixgbe_mac_82598EB) {
  4897. if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
  4898. adapter->num_vfs = 0;
  4899. e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n");
  4900. } else {
  4901. adapter->num_vfs = max_vfs;
  4902. }
  4903. }
  4904. #endif /* CONFIG_PCI_IOV */
  4905. /* enable itr by default in dynamic mode */
  4906. adapter->rx_itr_setting = 1;
  4907. adapter->tx_itr_setting = 1;
  4908. /* set default ring sizes */
  4909. adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
  4910. adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
  4911. /* set default work limits */
  4912. adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
  4913. /* initialize eeprom parameters */
  4914. if (ixgbe_init_eeprom_params_generic(hw)) {
  4915. e_dev_err("EEPROM initialization failed\n");
  4916. return -EIO;
  4917. }
  4918. /* PF holds first pool slot */
  4919. set_bit(0, &adapter->fwd_bitmask);
  4920. set_bit(__IXGBE_DOWN, &adapter->state);
  4921. return 0;
  4922. }
  4923. /**
  4924. * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
  4925. * @tx_ring: tx descriptor ring (for a specific queue) to setup
  4926. *
  4927. * Return 0 on success, negative on failure
  4928. **/
  4929. int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
  4930. {
  4931. struct device *dev = tx_ring->dev;
  4932. int orig_node = dev_to_node(dev);
  4933. int ring_node = -1;
  4934. int size;
  4935. size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
  4936. if (tx_ring->q_vector)
  4937. ring_node = tx_ring->q_vector->numa_node;
  4938. tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
  4939. if (!tx_ring->tx_buffer_info)
  4940. tx_ring->tx_buffer_info = vzalloc(size);
  4941. if (!tx_ring->tx_buffer_info)
  4942. goto err;
  4943. u64_stats_init(&tx_ring->syncp);
  4944. /* round up to nearest 4K */
  4945. tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
  4946. tx_ring->size = ALIGN(tx_ring->size, 4096);
  4947. set_dev_node(dev, ring_node);
  4948. tx_ring->desc = dma_alloc_coherent(dev,
  4949. tx_ring->size,
  4950. &tx_ring->dma,
  4951. GFP_KERNEL);
  4952. set_dev_node(dev, orig_node);
  4953. if (!tx_ring->desc)
  4954. tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
  4955. &tx_ring->dma, GFP_KERNEL);
  4956. if (!tx_ring->desc)
  4957. goto err;
  4958. tx_ring->next_to_use = 0;
  4959. tx_ring->next_to_clean = 0;
  4960. return 0;
  4961. err:
  4962. vfree(tx_ring->tx_buffer_info);
  4963. tx_ring->tx_buffer_info = NULL;
  4964. dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
  4965. return -ENOMEM;
  4966. }
  4967. /**
  4968. * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
  4969. * @adapter: board private structure
  4970. *
  4971. * If this function returns with an error, then it's possible one or
  4972. * more of the rings is populated (while the rest are not). It is the
  4973. * callers duty to clean those orphaned rings.
  4974. *
  4975. * Return 0 on success, negative on failure
  4976. **/
  4977. static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
  4978. {
  4979. int i, err = 0;
  4980. for (i = 0; i < adapter->num_tx_queues; i++) {
  4981. err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
  4982. if (!err)
  4983. continue;
  4984. e_err(probe, "Allocation for Tx Queue %u failed\n", i);
  4985. goto err_setup_tx;
  4986. }
  4987. return 0;
  4988. err_setup_tx:
  4989. /* rewind the index freeing the rings as we go */
  4990. while (i--)
  4991. ixgbe_free_tx_resources(adapter->tx_ring[i]);
  4992. return err;
  4993. }
  4994. /**
  4995. * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
  4996. * @rx_ring: rx descriptor ring (for a specific queue) to setup
  4997. *
  4998. * Returns 0 on success, negative on failure
  4999. **/
  5000. int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
  5001. {
  5002. struct device *dev = rx_ring->dev;
  5003. int orig_node = dev_to_node(dev);
  5004. int ring_node = -1;
  5005. int size;
  5006. size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
  5007. if (rx_ring->q_vector)
  5008. ring_node = rx_ring->q_vector->numa_node;
  5009. rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
  5010. if (!rx_ring->rx_buffer_info)
  5011. rx_ring->rx_buffer_info = vzalloc(size);
  5012. if (!rx_ring->rx_buffer_info)
  5013. goto err;
  5014. u64_stats_init(&rx_ring->syncp);
  5015. /* Round up to nearest 4K */
  5016. rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
  5017. rx_ring->size = ALIGN(rx_ring->size, 4096);
  5018. set_dev_node(dev, ring_node);
  5019. rx_ring->desc = dma_alloc_coherent(dev,
  5020. rx_ring->size,
  5021. &rx_ring->dma,
  5022. GFP_KERNEL);
  5023. set_dev_node(dev, orig_node);
  5024. if (!rx_ring->desc)
  5025. rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
  5026. &rx_ring->dma, GFP_KERNEL);
  5027. if (!rx_ring->desc)
  5028. goto err;
  5029. rx_ring->next_to_clean = 0;
  5030. rx_ring->next_to_use = 0;
  5031. return 0;
  5032. err:
  5033. vfree(rx_ring->rx_buffer_info);
  5034. rx_ring->rx_buffer_info = NULL;
  5035. dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
  5036. return -ENOMEM;
  5037. }
  5038. /**
  5039. * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
  5040. * @adapter: board private structure
  5041. *
  5042. * If this function returns with an error, then it's possible one or
  5043. * more of the rings is populated (while the rest are not). It is the
  5044. * callers duty to clean those orphaned rings.
  5045. *
  5046. * Return 0 on success, negative on failure
  5047. **/
  5048. static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
  5049. {
  5050. int i, err = 0;
  5051. for (i = 0; i < adapter->num_rx_queues; i++) {
  5052. err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
  5053. if (!err)
  5054. continue;
  5055. e_err(probe, "Allocation for Rx Queue %u failed\n", i);
  5056. goto err_setup_rx;
  5057. }
  5058. #ifdef IXGBE_FCOE
  5059. err = ixgbe_setup_fcoe_ddp_resources(adapter);
  5060. if (!err)
  5061. #endif
  5062. return 0;
  5063. err_setup_rx:
  5064. /* rewind the index freeing the rings as we go */
  5065. while (i--)
  5066. ixgbe_free_rx_resources(adapter->rx_ring[i]);
  5067. return err;
  5068. }
  5069. /**
  5070. * ixgbe_free_tx_resources - Free Tx Resources per Queue
  5071. * @tx_ring: Tx descriptor ring for a specific queue
  5072. *
  5073. * Free all transmit software resources
  5074. **/
  5075. void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
  5076. {
  5077. ixgbe_clean_tx_ring(tx_ring);
  5078. vfree(tx_ring->tx_buffer_info);
  5079. tx_ring->tx_buffer_info = NULL;
  5080. /* if not set, then don't free */
  5081. if (!tx_ring->desc)
  5082. return;
  5083. dma_free_coherent(tx_ring->dev, tx_ring->size,
  5084. tx_ring->desc, tx_ring->dma);
  5085. tx_ring->desc = NULL;
  5086. }
  5087. /**
  5088. * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
  5089. * @adapter: board private structure
  5090. *
  5091. * Free all transmit software resources
  5092. **/
  5093. static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
  5094. {
  5095. int i;
  5096. for (i = 0; i < adapter->num_tx_queues; i++)
  5097. if (adapter->tx_ring[i]->desc)
  5098. ixgbe_free_tx_resources(adapter->tx_ring[i]);
  5099. }
  5100. /**
  5101. * ixgbe_free_rx_resources - Free Rx Resources
  5102. * @rx_ring: ring to clean the resources from
  5103. *
  5104. * Free all receive software resources
  5105. **/
  5106. void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
  5107. {
  5108. ixgbe_clean_rx_ring(rx_ring);
  5109. vfree(rx_ring->rx_buffer_info);
  5110. rx_ring->rx_buffer_info = NULL;
  5111. /* if not set, then don't free */
  5112. if (!rx_ring->desc)
  5113. return;
  5114. dma_free_coherent(rx_ring->dev, rx_ring->size,
  5115. rx_ring->desc, rx_ring->dma);
  5116. rx_ring->desc = NULL;
  5117. }
  5118. /**
  5119. * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
  5120. * @adapter: board private structure
  5121. *
  5122. * Free all receive software resources
  5123. **/
  5124. static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
  5125. {
  5126. int i;
  5127. #ifdef IXGBE_FCOE
  5128. ixgbe_free_fcoe_ddp_resources(adapter);
  5129. #endif
  5130. for (i = 0; i < adapter->num_rx_queues; i++)
  5131. if (adapter->rx_ring[i]->desc)
  5132. ixgbe_free_rx_resources(adapter->rx_ring[i]);
  5133. }
  5134. /**
  5135. * ixgbe_change_mtu - Change the Maximum Transfer Unit
  5136. * @netdev: network interface device structure
  5137. * @new_mtu: new value for maximum frame size
  5138. *
  5139. * Returns 0 on success, negative on failure
  5140. **/
  5141. static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
  5142. {
  5143. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5144. int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
  5145. /* MTU < 68 is an error and causes problems on some kernels */
  5146. if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
  5147. return -EINVAL;
  5148. /*
  5149. * For 82599EB we cannot allow legacy VFs to enable their receive
  5150. * paths when MTU greater than 1500 is configured. So display a
  5151. * warning that legacy VFs will be disabled.
  5152. */
  5153. if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
  5154. (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
  5155. (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
  5156. e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
  5157. e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
  5158. /* must set new MTU before calling down or up */
  5159. netdev->mtu = new_mtu;
  5160. if (netif_running(netdev))
  5161. ixgbe_reinit_locked(adapter);
  5162. return 0;
  5163. }
  5164. /**
  5165. * ixgbe_open - Called when a network interface is made active
  5166. * @netdev: network interface device structure
  5167. *
  5168. * Returns 0 on success, negative value on failure
  5169. *
  5170. * The open entry point is called when a network interface is made
  5171. * active by the system (IFF_UP). At this point all resources needed
  5172. * for transmit and receive operations are allocated, the interrupt
  5173. * handler is registered with the OS, the watchdog timer is started,
  5174. * and the stack is notified that the interface is ready.
  5175. **/
  5176. int ixgbe_open(struct net_device *netdev)
  5177. {
  5178. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5179. struct ixgbe_hw *hw = &adapter->hw;
  5180. int err, queues;
  5181. /* disallow open during test */
  5182. if (test_bit(__IXGBE_TESTING, &adapter->state))
  5183. return -EBUSY;
  5184. netif_carrier_off(netdev);
  5185. /* allocate transmit descriptors */
  5186. err = ixgbe_setup_all_tx_resources(adapter);
  5187. if (err)
  5188. goto err_setup_tx;
  5189. /* allocate receive descriptors */
  5190. err = ixgbe_setup_all_rx_resources(adapter);
  5191. if (err)
  5192. goto err_setup_rx;
  5193. ixgbe_configure(adapter);
  5194. err = ixgbe_request_irq(adapter);
  5195. if (err)
  5196. goto err_req_irq;
  5197. /* Notify the stack of the actual queue counts. */
  5198. if (adapter->num_rx_pools > 1)
  5199. queues = adapter->num_rx_queues_per_pool;
  5200. else
  5201. queues = adapter->num_tx_queues;
  5202. err = netif_set_real_num_tx_queues(netdev, queues);
  5203. if (err)
  5204. goto err_set_queues;
  5205. if (adapter->num_rx_pools > 1 &&
  5206. adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES)
  5207. queues = IXGBE_MAX_L2A_QUEUES;
  5208. else
  5209. queues = adapter->num_rx_queues;
  5210. err = netif_set_real_num_rx_queues(netdev, queues);
  5211. if (err)
  5212. goto err_set_queues;
  5213. ixgbe_ptp_init(adapter);
  5214. ixgbe_up_complete(adapter);
  5215. ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
  5216. udp_tunnel_get_rx_info(netdev);
  5217. return 0;
  5218. err_set_queues:
  5219. ixgbe_free_irq(adapter);
  5220. err_req_irq:
  5221. ixgbe_free_all_rx_resources(adapter);
  5222. if (hw->phy.ops.set_phy_power && !adapter->wol)
  5223. hw->phy.ops.set_phy_power(&adapter->hw, false);
  5224. err_setup_rx:
  5225. ixgbe_free_all_tx_resources(adapter);
  5226. err_setup_tx:
  5227. ixgbe_reset(adapter);
  5228. return err;
  5229. }
  5230. static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
  5231. {
  5232. ixgbe_ptp_suspend(adapter);
  5233. if (adapter->hw.phy.ops.enter_lplu) {
  5234. adapter->hw.phy.reset_disable = true;
  5235. ixgbe_down(adapter);
  5236. adapter->hw.phy.ops.enter_lplu(&adapter->hw);
  5237. adapter->hw.phy.reset_disable = false;
  5238. } else {
  5239. ixgbe_down(adapter);
  5240. }
  5241. ixgbe_free_irq(adapter);
  5242. ixgbe_free_all_tx_resources(adapter);
  5243. ixgbe_free_all_rx_resources(adapter);
  5244. }
  5245. /**
  5246. * ixgbe_close - Disables a network interface
  5247. * @netdev: network interface device structure
  5248. *
  5249. * Returns 0, this is not allowed to fail
  5250. *
  5251. * The close entry point is called when an interface is de-activated
  5252. * by the OS. The hardware is still under the drivers control, but
  5253. * needs to be disabled. A global MAC reset is issued to stop the
  5254. * hardware, and all transmit and receive resources are freed.
  5255. **/
  5256. int ixgbe_close(struct net_device *netdev)
  5257. {
  5258. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  5259. ixgbe_ptp_stop(adapter);
  5260. if (netif_device_present(netdev))
  5261. ixgbe_close_suspend(adapter);
  5262. ixgbe_fdir_filter_exit(adapter);
  5263. ixgbe_release_hw_control(adapter);
  5264. return 0;
  5265. }
  5266. #ifdef CONFIG_PM
  5267. static int ixgbe_resume(struct pci_dev *pdev)
  5268. {
  5269. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  5270. struct net_device *netdev = adapter->netdev;
  5271. u32 err;
  5272. adapter->hw.hw_addr = adapter->io_addr;
  5273. pci_set_power_state(pdev, PCI_D0);
  5274. pci_restore_state(pdev);
  5275. /*
  5276. * pci_restore_state clears dev->state_saved so call
  5277. * pci_save_state to restore it.
  5278. */
  5279. pci_save_state(pdev);
  5280. err = pci_enable_device_mem(pdev);
  5281. if (err) {
  5282. e_dev_err("Cannot enable PCI device from suspend\n");
  5283. return err;
  5284. }
  5285. smp_mb__before_atomic();
  5286. clear_bit(__IXGBE_DISABLED, &adapter->state);
  5287. pci_set_master(pdev);
  5288. pci_wake_from_d3(pdev, false);
  5289. ixgbe_reset(adapter);
  5290. IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
  5291. rtnl_lock();
  5292. err = ixgbe_init_interrupt_scheme(adapter);
  5293. if (!err && netif_running(netdev))
  5294. err = ixgbe_open(netdev);
  5295. if (!err)
  5296. netif_device_attach(netdev);
  5297. rtnl_unlock();
  5298. return err;
  5299. }
  5300. #endif /* CONFIG_PM */
  5301. static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
  5302. {
  5303. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  5304. struct net_device *netdev = adapter->netdev;
  5305. struct ixgbe_hw *hw = &adapter->hw;
  5306. u32 ctrl, fctrl;
  5307. u32 wufc = adapter->wol;
  5308. #ifdef CONFIG_PM
  5309. int retval = 0;
  5310. #endif
  5311. rtnl_lock();
  5312. netif_device_detach(netdev);
  5313. if (netif_running(netdev))
  5314. ixgbe_close_suspend(adapter);
  5315. ixgbe_clear_interrupt_scheme(adapter);
  5316. rtnl_unlock();
  5317. #ifdef CONFIG_PM
  5318. retval = pci_save_state(pdev);
  5319. if (retval)
  5320. return retval;
  5321. #endif
  5322. if (hw->mac.ops.stop_link_on_d3)
  5323. hw->mac.ops.stop_link_on_d3(hw);
  5324. if (wufc) {
  5325. ixgbe_set_rx_mode(netdev);
  5326. /* enable the optics for 82599 SFP+ fiber as we can WoL */
  5327. if (hw->mac.ops.enable_tx_laser)
  5328. hw->mac.ops.enable_tx_laser(hw);
  5329. /* turn on all-multi mode if wake on multicast is enabled */
  5330. if (wufc & IXGBE_WUFC_MC) {
  5331. fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  5332. fctrl |= IXGBE_FCTRL_MPE;
  5333. IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
  5334. }
  5335. ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
  5336. ctrl |= IXGBE_CTRL_GIO_DIS;
  5337. IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
  5338. IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
  5339. } else {
  5340. IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
  5341. IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
  5342. }
  5343. switch (hw->mac.type) {
  5344. case ixgbe_mac_82598EB:
  5345. pci_wake_from_d3(pdev, false);
  5346. break;
  5347. case ixgbe_mac_82599EB:
  5348. case ixgbe_mac_X540:
  5349. case ixgbe_mac_X550:
  5350. case ixgbe_mac_X550EM_x:
  5351. case ixgbe_mac_x550em_a:
  5352. pci_wake_from_d3(pdev, !!wufc);
  5353. break;
  5354. default:
  5355. break;
  5356. }
  5357. *enable_wake = !!wufc;
  5358. if (hw->phy.ops.set_phy_power && !*enable_wake)
  5359. hw->phy.ops.set_phy_power(hw, false);
  5360. ixgbe_release_hw_control(adapter);
  5361. if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
  5362. pci_disable_device(pdev);
  5363. return 0;
  5364. }
  5365. #ifdef CONFIG_PM
  5366. static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
  5367. {
  5368. int retval;
  5369. bool wake;
  5370. retval = __ixgbe_shutdown(pdev, &wake);
  5371. if (retval)
  5372. return retval;
  5373. if (wake) {
  5374. pci_prepare_to_sleep(pdev);
  5375. } else {
  5376. pci_wake_from_d3(pdev, false);
  5377. pci_set_power_state(pdev, PCI_D3hot);
  5378. }
  5379. return 0;
  5380. }
  5381. #endif /* CONFIG_PM */
  5382. static void ixgbe_shutdown(struct pci_dev *pdev)
  5383. {
  5384. bool wake;
  5385. __ixgbe_shutdown(pdev, &wake);
  5386. if (system_state == SYSTEM_POWER_OFF) {
  5387. pci_wake_from_d3(pdev, wake);
  5388. pci_set_power_state(pdev, PCI_D3hot);
  5389. }
  5390. }
  5391. /**
  5392. * ixgbe_update_stats - Update the board statistics counters.
  5393. * @adapter: board private structure
  5394. **/
  5395. void ixgbe_update_stats(struct ixgbe_adapter *adapter)
  5396. {
  5397. struct net_device *netdev = adapter->netdev;
  5398. struct ixgbe_hw *hw = &adapter->hw;
  5399. struct ixgbe_hw_stats *hwstats = &adapter->stats;
  5400. u64 total_mpc = 0;
  5401. u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
  5402. u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
  5403. u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
  5404. u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
  5405. if (test_bit(__IXGBE_DOWN, &adapter->state) ||
  5406. test_bit(__IXGBE_RESETTING, &adapter->state))
  5407. return;
  5408. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
  5409. u64 rsc_count = 0;
  5410. u64 rsc_flush = 0;
  5411. for (i = 0; i < adapter->num_rx_queues; i++) {
  5412. rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
  5413. rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
  5414. }
  5415. adapter->rsc_total_count = rsc_count;
  5416. adapter->rsc_total_flush = rsc_flush;
  5417. }
  5418. for (i = 0; i < adapter->num_rx_queues; i++) {
  5419. struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
  5420. non_eop_descs += rx_ring->rx_stats.non_eop_descs;
  5421. alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
  5422. alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
  5423. hw_csum_rx_error += rx_ring->rx_stats.csum_err;
  5424. bytes += rx_ring->stats.bytes;
  5425. packets += rx_ring->stats.packets;
  5426. }
  5427. adapter->non_eop_descs = non_eop_descs;
  5428. adapter->alloc_rx_page_failed = alloc_rx_page_failed;
  5429. adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
  5430. adapter->hw_csum_rx_error = hw_csum_rx_error;
  5431. netdev->stats.rx_bytes = bytes;
  5432. netdev->stats.rx_packets = packets;
  5433. bytes = 0;
  5434. packets = 0;
  5435. /* gather some stats to the adapter struct that are per queue */
  5436. for (i = 0; i < adapter->num_tx_queues; i++) {
  5437. struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
  5438. restart_queue += tx_ring->tx_stats.restart_queue;
  5439. tx_busy += tx_ring->tx_stats.tx_busy;
  5440. bytes += tx_ring->stats.bytes;
  5441. packets += tx_ring->stats.packets;
  5442. }
  5443. adapter->restart_queue = restart_queue;
  5444. adapter->tx_busy = tx_busy;
  5445. netdev->stats.tx_bytes = bytes;
  5446. netdev->stats.tx_packets = packets;
  5447. hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
  5448. /* 8 register reads */
  5449. for (i = 0; i < 8; i++) {
  5450. /* for packet buffers not used, the register should read 0 */
  5451. mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
  5452. missed_rx += mpc;
  5453. hwstats->mpc[i] += mpc;
  5454. total_mpc += hwstats->mpc[i];
  5455. hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
  5456. hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
  5457. switch (hw->mac.type) {
  5458. case ixgbe_mac_82598EB:
  5459. hwstats->rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
  5460. hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
  5461. hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
  5462. hwstats->pxonrxc[i] +=
  5463. IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
  5464. break;
  5465. case ixgbe_mac_82599EB:
  5466. case ixgbe_mac_X540:
  5467. case ixgbe_mac_X550:
  5468. case ixgbe_mac_X550EM_x:
  5469. case ixgbe_mac_x550em_a:
  5470. hwstats->pxonrxc[i] +=
  5471. IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
  5472. break;
  5473. default:
  5474. break;
  5475. }
  5476. }
  5477. /*16 register reads */
  5478. for (i = 0; i < 16; i++) {
  5479. hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
  5480. hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
  5481. if ((hw->mac.type == ixgbe_mac_82599EB) ||
  5482. (hw->mac.type == ixgbe_mac_X540) ||
  5483. (hw->mac.type == ixgbe_mac_X550) ||
  5484. (hw->mac.type == ixgbe_mac_X550EM_x) ||
  5485. (hw->mac.type == ixgbe_mac_x550em_a)) {
  5486. hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
  5487. IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
  5488. hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
  5489. IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); /* to clear */
  5490. }
  5491. }
  5492. hwstats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
  5493. /* work around hardware counting issue */
  5494. hwstats->gprc -= missed_rx;
  5495. ixgbe_update_xoff_received(adapter);
  5496. /* 82598 hardware only has a 32 bit counter in the high register */
  5497. switch (hw->mac.type) {
  5498. case ixgbe_mac_82598EB:
  5499. hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
  5500. hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
  5501. hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
  5502. hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
  5503. break;
  5504. case ixgbe_mac_X540:
  5505. case ixgbe_mac_X550:
  5506. case ixgbe_mac_X550EM_x:
  5507. case ixgbe_mac_x550em_a:
  5508. /* OS2BMC stats are X540 and later */
  5509. hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
  5510. hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
  5511. hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
  5512. hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
  5513. case ixgbe_mac_82599EB:
  5514. for (i = 0; i < 16; i++)
  5515. adapter->hw_rx_no_dma_resources +=
  5516. IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
  5517. hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
  5518. IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
  5519. hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
  5520. IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
  5521. hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
  5522. IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
  5523. hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
  5524. hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
  5525. hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
  5526. #ifdef IXGBE_FCOE
  5527. hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
  5528. hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
  5529. hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
  5530. hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
  5531. hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
  5532. hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
  5533. /* Add up per cpu counters for total ddp aloc fail */
  5534. if (adapter->fcoe.ddp_pool) {
  5535. struct ixgbe_fcoe *fcoe = &adapter->fcoe;
  5536. struct ixgbe_fcoe_ddp_pool *ddp_pool;
  5537. unsigned int cpu;
  5538. u64 noddp = 0, noddp_ext_buff = 0;
  5539. for_each_possible_cpu(cpu) {
  5540. ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
  5541. noddp += ddp_pool->noddp;
  5542. noddp_ext_buff += ddp_pool->noddp_ext_buff;
  5543. }
  5544. hwstats->fcoe_noddp = noddp;
  5545. hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
  5546. }
  5547. #endif /* IXGBE_FCOE */
  5548. break;
  5549. default:
  5550. break;
  5551. }
  5552. bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
  5553. hwstats->bprc += bprc;
  5554. hwstats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
  5555. if (hw->mac.type == ixgbe_mac_82598EB)
  5556. hwstats->mprc -= bprc;
  5557. hwstats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
  5558. hwstats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
  5559. hwstats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
  5560. hwstats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
  5561. hwstats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
  5562. hwstats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
  5563. hwstats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
  5564. hwstats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
  5565. lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
  5566. hwstats->lxontxc += lxon;
  5567. lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
  5568. hwstats->lxofftxc += lxoff;
  5569. hwstats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
  5570. hwstats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
  5571. /*
  5572. * 82598 errata - tx of flow control packets is included in tx counters
  5573. */
  5574. xon_off_tot = lxon + lxoff;
  5575. hwstats->gptc -= xon_off_tot;
  5576. hwstats->mptc -= xon_off_tot;
  5577. hwstats->gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
  5578. hwstats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
  5579. hwstats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
  5580. hwstats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
  5581. hwstats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
  5582. hwstats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
  5583. hwstats->ptc64 -= xon_off_tot;
  5584. hwstats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
  5585. hwstats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
  5586. hwstats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
  5587. hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
  5588. hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
  5589. hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
  5590. /* Fill out the OS statistics structure */
  5591. netdev->stats.multicast = hwstats->mprc;
  5592. /* Rx Errors */
  5593. netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec;
  5594. netdev->stats.rx_dropped = 0;
  5595. netdev->stats.rx_length_errors = hwstats->rlec;
  5596. netdev->stats.rx_crc_errors = hwstats->crcerrs;
  5597. netdev->stats.rx_missed_errors = total_mpc;
  5598. }
  5599. /**
  5600. * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
  5601. * @adapter: pointer to the device adapter structure
  5602. **/
  5603. static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
  5604. {
  5605. struct ixgbe_hw *hw = &adapter->hw;
  5606. int i;
  5607. if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
  5608. return;
  5609. adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
  5610. /* if interface is down do nothing */
  5611. if (test_bit(__IXGBE_DOWN, &adapter->state))
  5612. return;
  5613. /* do nothing if we are not using signature filters */
  5614. if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
  5615. return;
  5616. adapter->fdir_overflow++;
  5617. if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
  5618. for (i = 0; i < adapter->num_tx_queues; i++)
  5619. set_bit(__IXGBE_TX_FDIR_INIT_DONE,
  5620. &(adapter->tx_ring[i]->state));
  5621. /* re-enable flow director interrupts */
  5622. IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
  5623. } else {
  5624. e_err(probe, "failed to finish FDIR re-initialization, "
  5625. "ignored adding FDIR ATR filters\n");
  5626. }
  5627. }
  5628. /**
  5629. * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
  5630. * @adapter: pointer to the device adapter structure
  5631. *
  5632. * This function serves two purposes. First it strobes the interrupt lines
  5633. * in order to make certain interrupts are occurring. Secondly it sets the
  5634. * bits needed to check for TX hangs. As a result we should immediately
  5635. * determine if a hang has occurred.
  5636. */
  5637. static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
  5638. {
  5639. struct ixgbe_hw *hw = &adapter->hw;
  5640. u64 eics = 0;
  5641. int i;
  5642. /* If we're down, removing or resetting, just bail */
  5643. if (test_bit(__IXGBE_DOWN, &adapter->state) ||
  5644. test_bit(__IXGBE_REMOVING, &adapter->state) ||
  5645. test_bit(__IXGBE_RESETTING, &adapter->state))
  5646. return;
  5647. /* Force detection of hung controller */
  5648. if (netif_carrier_ok(adapter->netdev)) {
  5649. for (i = 0; i < adapter->num_tx_queues; i++)
  5650. set_check_for_tx_hang(adapter->tx_ring[i]);
  5651. }
  5652. if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
  5653. /*
  5654. * for legacy and MSI interrupts don't set any bits
  5655. * that are enabled for EIAM, because this operation
  5656. * would set *both* EIMS and EICS for any bit in EIAM
  5657. */
  5658. IXGBE_WRITE_REG(hw, IXGBE_EICS,
  5659. (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
  5660. } else {
  5661. /* get one bit for every active tx/rx interrupt vector */
  5662. for (i = 0; i < adapter->num_q_vectors; i++) {
  5663. struct ixgbe_q_vector *qv = adapter->q_vector[i];
  5664. if (qv->rx.ring || qv->tx.ring)
  5665. eics |= BIT_ULL(i);
  5666. }
  5667. }
  5668. /* Cause software interrupt to ensure rings are cleaned */
  5669. ixgbe_irq_rearm_queues(adapter, eics);
  5670. }
  5671. /**
  5672. * ixgbe_watchdog_update_link - update the link status
  5673. * @adapter: pointer to the device adapter structure
  5674. * @link_speed: pointer to a u32 to store the link_speed
  5675. **/
  5676. static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
  5677. {
  5678. struct ixgbe_hw *hw = &adapter->hw;
  5679. u32 link_speed = adapter->link_speed;
  5680. bool link_up = adapter->link_up;
  5681. bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
  5682. if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
  5683. return;
  5684. if (hw->mac.ops.check_link) {
  5685. hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
  5686. } else {
  5687. /* always assume link is up, if no check link function */
  5688. link_speed = IXGBE_LINK_SPEED_10GB_FULL;
  5689. link_up = true;
  5690. }
  5691. if (adapter->ixgbe_ieee_pfc)
  5692. pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
  5693. if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) {
  5694. hw->mac.ops.fc_enable(hw);
  5695. ixgbe_set_rx_drop_en(adapter);
  5696. }
  5697. if (link_up ||
  5698. time_after(jiffies, (adapter->link_check_timeout +
  5699. IXGBE_TRY_LINK_TIMEOUT))) {
  5700. adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
  5701. IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
  5702. IXGBE_WRITE_FLUSH(hw);
  5703. }
  5704. adapter->link_up = link_up;
  5705. adapter->link_speed = link_speed;
  5706. }
  5707. static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
  5708. {
  5709. #ifdef CONFIG_IXGBE_DCB
  5710. struct net_device *netdev = adapter->netdev;
  5711. struct dcb_app app = {
  5712. .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
  5713. .protocol = 0,
  5714. };
  5715. u8 up = 0;
  5716. if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
  5717. up = dcb_ieee_getapp_mask(netdev, &app);
  5718. adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
  5719. #endif
  5720. }
  5721. /**
  5722. * ixgbe_watchdog_link_is_up - update netif_carrier status and
  5723. * print link up message
  5724. * @adapter: pointer to the device adapter structure
  5725. **/
  5726. static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
  5727. {
  5728. struct net_device *netdev = adapter->netdev;
  5729. struct ixgbe_hw *hw = &adapter->hw;
  5730. struct net_device *upper;
  5731. struct list_head *iter;
  5732. u32 link_speed = adapter->link_speed;
  5733. const char *speed_str;
  5734. bool flow_rx, flow_tx;
  5735. /* only continue if link was previously down */
  5736. if (netif_carrier_ok(netdev))
  5737. return;
  5738. adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
  5739. switch (hw->mac.type) {
  5740. case ixgbe_mac_82598EB: {
  5741. u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
  5742. u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
  5743. flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
  5744. flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
  5745. }
  5746. break;
  5747. case ixgbe_mac_X540:
  5748. case ixgbe_mac_X550:
  5749. case ixgbe_mac_X550EM_x:
  5750. case ixgbe_mac_x550em_a:
  5751. case ixgbe_mac_82599EB: {
  5752. u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
  5753. u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
  5754. flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
  5755. flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
  5756. }
  5757. break;
  5758. default:
  5759. flow_tx = false;
  5760. flow_rx = false;
  5761. break;
  5762. }
  5763. adapter->last_rx_ptp_check = jiffies;
  5764. if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
  5765. ixgbe_ptp_start_cyclecounter(adapter);
  5766. switch (link_speed) {
  5767. case IXGBE_LINK_SPEED_10GB_FULL:
  5768. speed_str = "10 Gbps";
  5769. break;
  5770. case IXGBE_LINK_SPEED_2_5GB_FULL:
  5771. speed_str = "2.5 Gbps";
  5772. break;
  5773. case IXGBE_LINK_SPEED_1GB_FULL:
  5774. speed_str = "1 Gbps";
  5775. break;
  5776. case IXGBE_LINK_SPEED_100_FULL:
  5777. speed_str = "100 Mbps";
  5778. break;
  5779. default:
  5780. speed_str = "unknown speed";
  5781. break;
  5782. }
  5783. e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str,
  5784. ((flow_rx && flow_tx) ? "RX/TX" :
  5785. (flow_rx ? "RX" :
  5786. (flow_tx ? "TX" : "None"))));
  5787. netif_carrier_on(netdev);
  5788. ixgbe_check_vf_rate_limit(adapter);
  5789. /* enable transmits */
  5790. netif_tx_wake_all_queues(adapter->netdev);
  5791. /* enable any upper devices */
  5792. rtnl_lock();
  5793. netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
  5794. if (netif_is_macvlan(upper)) {
  5795. struct macvlan_dev *vlan = netdev_priv(upper);
  5796. if (vlan->fwd_priv)
  5797. netif_tx_wake_all_queues(upper);
  5798. }
  5799. }
  5800. rtnl_unlock();
  5801. /* update the default user priority for VFs */
  5802. ixgbe_update_default_up(adapter);
  5803. /* ping all the active vfs to let them know link has changed */
  5804. ixgbe_ping_all_vfs(adapter);
  5805. }
  5806. /**
  5807. * ixgbe_watchdog_link_is_down - update netif_carrier status and
  5808. * print link down message
  5809. * @adapter: pointer to the adapter structure
  5810. **/
  5811. static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
  5812. {
  5813. struct net_device *netdev = adapter->netdev;
  5814. struct ixgbe_hw *hw = &adapter->hw;
  5815. adapter->link_up = false;
  5816. adapter->link_speed = 0;
  5817. /* only continue if link was up previously */
  5818. if (!netif_carrier_ok(netdev))
  5819. return;
  5820. /* poll for SFP+ cable when link is down */
  5821. if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
  5822. adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
  5823. if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
  5824. ixgbe_ptp_start_cyclecounter(adapter);
  5825. e_info(drv, "NIC Link is Down\n");
  5826. netif_carrier_off(netdev);
  5827. /* ping all the active vfs to let them know link has changed */
  5828. ixgbe_ping_all_vfs(adapter);
  5829. }
  5830. static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
  5831. {
  5832. int i;
  5833. for (i = 0; i < adapter->num_tx_queues; i++) {
  5834. struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
  5835. if (tx_ring->next_to_use != tx_ring->next_to_clean)
  5836. return true;
  5837. }
  5838. return false;
  5839. }
  5840. static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter)
  5841. {
  5842. struct ixgbe_hw *hw = &adapter->hw;
  5843. struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
  5844. u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
  5845. int i, j;
  5846. if (!adapter->num_vfs)
  5847. return false;
  5848. /* resetting the PF is only needed for MAC before X550 */
  5849. if (hw->mac.type >= ixgbe_mac_X550)
  5850. return false;
  5851. for (i = 0; i < adapter->num_vfs; i++) {
  5852. for (j = 0; j < q_per_pool; j++) {
  5853. u32 h, t;
  5854. h = IXGBE_READ_REG(hw, IXGBE_PVFTDHN(q_per_pool, i, j));
  5855. t = IXGBE_READ_REG(hw, IXGBE_PVFTDTN(q_per_pool, i, j));
  5856. if (h != t)
  5857. return true;
  5858. }
  5859. }
  5860. return false;
  5861. }
  5862. /**
  5863. * ixgbe_watchdog_flush_tx - flush queues on link down
  5864. * @adapter: pointer to the device adapter structure
  5865. **/
  5866. static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
  5867. {
  5868. if (!netif_carrier_ok(adapter->netdev)) {
  5869. if (ixgbe_ring_tx_pending(adapter) ||
  5870. ixgbe_vf_tx_pending(adapter)) {
  5871. /* We've lost link, so the controller stops DMA,
  5872. * but we've got queued Tx work that's never going
  5873. * to get done, so reset controller to flush Tx.
  5874. * (Do the reset outside of interrupt context).
  5875. */
  5876. e_warn(drv, "initiating reset to clear Tx work after link loss\n");
  5877. set_bit(__IXGBE_RESET_REQUESTED, &adapter->state);
  5878. }
  5879. }
  5880. }
  5881. #ifdef CONFIG_PCI_IOV
  5882. static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter,
  5883. struct pci_dev *vfdev)
  5884. {
  5885. if (!pci_wait_for_pending_transaction(vfdev))
  5886. e_dev_warn("Issuing VFLR with pending transactions\n");
  5887. e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev));
  5888. pcie_capability_set_word(vfdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
  5889. msleep(100);
  5890. }
  5891. static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
  5892. {
  5893. struct ixgbe_hw *hw = &adapter->hw;
  5894. struct pci_dev *pdev = adapter->pdev;
  5895. unsigned int vf;
  5896. u32 gpc;
  5897. if (!(netif_carrier_ok(adapter->netdev)))
  5898. return;
  5899. gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC);
  5900. if (gpc) /* If incrementing then no need for the check below */
  5901. return;
  5902. /* Check to see if a bad DMA write target from an errant or
  5903. * malicious VF has caused a PCIe error. If so then we can
  5904. * issue a VFLR to the offending VF(s) and then resume without
  5905. * requesting a full slot reset.
  5906. */
  5907. if (!pdev)
  5908. return;
  5909. /* check status reg for all VFs owned by this PF */
  5910. for (vf = 0; vf < adapter->num_vfs; ++vf) {
  5911. struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
  5912. u16 status_reg;
  5913. if (!vfdev)
  5914. continue;
  5915. pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
  5916. if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
  5917. status_reg & PCI_STATUS_REC_MASTER_ABORT)
  5918. ixgbe_issue_vf_flr(adapter, vfdev);
  5919. }
  5920. }
  5921. static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
  5922. {
  5923. u32 ssvpc;
  5924. /* Do not perform spoof check for 82598 or if not in IOV mode */
  5925. if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
  5926. adapter->num_vfs == 0)
  5927. return;
  5928. ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
  5929. /*
  5930. * ssvpc register is cleared on read, if zero then no
  5931. * spoofed packets in the last interval.
  5932. */
  5933. if (!ssvpc)
  5934. return;
  5935. e_warn(drv, "%u Spoofed packets detected\n", ssvpc);
  5936. }
  5937. #else
  5938. static void ixgbe_spoof_check(struct ixgbe_adapter __always_unused *adapter)
  5939. {
  5940. }
  5941. static void
  5942. ixgbe_check_for_bad_vf(struct ixgbe_adapter __always_unused *adapter)
  5943. {
  5944. }
  5945. #endif /* CONFIG_PCI_IOV */
  5946. /**
  5947. * ixgbe_watchdog_subtask - check and bring link up
  5948. * @adapter: pointer to the device adapter structure
  5949. **/
  5950. static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
  5951. {
  5952. /* if interface is down, removing or resetting, do nothing */
  5953. if (test_bit(__IXGBE_DOWN, &adapter->state) ||
  5954. test_bit(__IXGBE_REMOVING, &adapter->state) ||
  5955. test_bit(__IXGBE_RESETTING, &adapter->state))
  5956. return;
  5957. ixgbe_watchdog_update_link(adapter);
  5958. if (adapter->link_up)
  5959. ixgbe_watchdog_link_is_up(adapter);
  5960. else
  5961. ixgbe_watchdog_link_is_down(adapter);
  5962. ixgbe_check_for_bad_vf(adapter);
  5963. ixgbe_spoof_check(adapter);
  5964. ixgbe_update_stats(adapter);
  5965. ixgbe_watchdog_flush_tx(adapter);
  5966. }
  5967. /**
  5968. * ixgbe_sfp_detection_subtask - poll for SFP+ cable
  5969. * @adapter: the ixgbe adapter structure
  5970. **/
  5971. static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
  5972. {
  5973. struct ixgbe_hw *hw = &adapter->hw;
  5974. s32 err;
  5975. /* not searching for SFP so there is nothing to do here */
  5976. if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
  5977. !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
  5978. return;
  5979. if (adapter->sfp_poll_time &&
  5980. time_after(adapter->sfp_poll_time, jiffies))
  5981. return; /* If not yet time to poll for SFP */
  5982. /* someone else is in init, wait until next service event */
  5983. if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  5984. return;
  5985. adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1;
  5986. err = hw->phy.ops.identify_sfp(hw);
  5987. if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
  5988. goto sfp_out;
  5989. if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
  5990. /* If no cable is present, then we need to reset
  5991. * the next time we find a good cable. */
  5992. adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
  5993. }
  5994. /* exit on error */
  5995. if (err)
  5996. goto sfp_out;
  5997. /* exit if reset not needed */
  5998. if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
  5999. goto sfp_out;
  6000. adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
  6001. /*
  6002. * A module may be identified correctly, but the EEPROM may not have
  6003. * support for that module. setup_sfp() will fail in that case, so
  6004. * we should not allow that module to load.
  6005. */
  6006. if (hw->mac.type == ixgbe_mac_82598EB)
  6007. err = hw->phy.ops.reset(hw);
  6008. else
  6009. err = hw->mac.ops.setup_sfp(hw);
  6010. if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
  6011. goto sfp_out;
  6012. adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
  6013. e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
  6014. sfp_out:
  6015. clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
  6016. if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
  6017. (adapter->netdev->reg_state == NETREG_REGISTERED)) {
  6018. e_dev_err("failed to initialize because an unsupported "
  6019. "SFP+ module type was detected.\n");
  6020. e_dev_err("Reload the driver after installing a "
  6021. "supported module.\n");
  6022. unregister_netdev(adapter->netdev);
  6023. }
  6024. }
  6025. /**
  6026. * ixgbe_sfp_link_config_subtask - set up link SFP after module install
  6027. * @adapter: the ixgbe adapter structure
  6028. **/
  6029. static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
  6030. {
  6031. struct ixgbe_hw *hw = &adapter->hw;
  6032. u32 speed;
  6033. bool autoneg = false;
  6034. if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
  6035. return;
  6036. /* someone else is in init, wait until next service event */
  6037. if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
  6038. return;
  6039. adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
  6040. speed = hw->phy.autoneg_advertised;
  6041. if ((!speed) && (hw->mac.ops.get_link_capabilities)) {
  6042. hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg);
  6043. /* setup the highest link when no autoneg */
  6044. if (!autoneg) {
  6045. if (speed & IXGBE_LINK_SPEED_10GB_FULL)
  6046. speed = IXGBE_LINK_SPEED_10GB_FULL;
  6047. }
  6048. }
  6049. if (hw->mac.ops.setup_link)
  6050. hw->mac.ops.setup_link(hw, speed, true);
  6051. adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
  6052. adapter->link_check_timeout = jiffies;
  6053. clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
  6054. }
  6055. /**
  6056. * ixgbe_service_timer - Timer Call-back
  6057. * @data: pointer to adapter cast into an unsigned long
  6058. **/
  6059. static void ixgbe_service_timer(unsigned long data)
  6060. {
  6061. struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
  6062. unsigned long next_event_offset;
  6063. /* poll faster when waiting for link */
  6064. if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
  6065. next_event_offset = HZ / 10;
  6066. else
  6067. next_event_offset = HZ * 2;
  6068. /* Reset the timer */
  6069. mod_timer(&adapter->service_timer, next_event_offset + jiffies);
  6070. ixgbe_service_event_schedule(adapter);
  6071. }
  6072. static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter)
  6073. {
  6074. struct ixgbe_hw *hw = &adapter->hw;
  6075. u32 status;
  6076. if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT))
  6077. return;
  6078. adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT;
  6079. if (!hw->phy.ops.handle_lasi)
  6080. return;
  6081. status = hw->phy.ops.handle_lasi(&adapter->hw);
  6082. if (status != IXGBE_ERR_OVERTEMP)
  6083. return;
  6084. e_crit(drv, "%s\n", ixgbe_overheat_msg);
  6085. }
  6086. static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
  6087. {
  6088. if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state))
  6089. return;
  6090. /* If we're already down, removing or resetting, just bail */
  6091. if (test_bit(__IXGBE_DOWN, &adapter->state) ||
  6092. test_bit(__IXGBE_REMOVING, &adapter->state) ||
  6093. test_bit(__IXGBE_RESETTING, &adapter->state))
  6094. return;
  6095. ixgbe_dump(adapter);
  6096. netdev_err(adapter->netdev, "Reset adapter\n");
  6097. adapter->tx_timeout_count++;
  6098. rtnl_lock();
  6099. ixgbe_reinit_locked(adapter);
  6100. rtnl_unlock();
  6101. }
  6102. /**
  6103. * ixgbe_service_task - manages and runs subtasks
  6104. * @work: pointer to work_struct containing our data
  6105. **/
  6106. static void ixgbe_service_task(struct work_struct *work)
  6107. {
  6108. struct ixgbe_adapter *adapter = container_of(work,
  6109. struct ixgbe_adapter,
  6110. service_task);
  6111. if (ixgbe_removed(adapter->hw.hw_addr)) {
  6112. if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
  6113. rtnl_lock();
  6114. ixgbe_down(adapter);
  6115. rtnl_unlock();
  6116. }
  6117. ixgbe_service_event_complete(adapter);
  6118. return;
  6119. }
  6120. if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
  6121. rtnl_lock();
  6122. adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
  6123. udp_tunnel_get_rx_info(adapter->netdev);
  6124. rtnl_unlock();
  6125. }
  6126. ixgbe_reset_subtask(adapter);
  6127. ixgbe_phy_interrupt_subtask(adapter);
  6128. ixgbe_sfp_detection_subtask(adapter);
  6129. ixgbe_sfp_link_config_subtask(adapter);
  6130. ixgbe_check_overtemp_subtask(adapter);
  6131. ixgbe_watchdog_subtask(adapter);
  6132. ixgbe_fdir_reinit_subtask(adapter);
  6133. ixgbe_check_hang_subtask(adapter);
  6134. if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
  6135. ixgbe_ptp_overflow_check(adapter);
  6136. ixgbe_ptp_rx_hang(adapter);
  6137. }
  6138. ixgbe_service_event_complete(adapter);
  6139. }
  6140. static int ixgbe_tso(struct ixgbe_ring *tx_ring,
  6141. struct ixgbe_tx_buffer *first,
  6142. u8 *hdr_len)
  6143. {
  6144. u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
  6145. struct sk_buff *skb = first->skb;
  6146. union {
  6147. struct iphdr *v4;
  6148. struct ipv6hdr *v6;
  6149. unsigned char *hdr;
  6150. } ip;
  6151. union {
  6152. struct tcphdr *tcp;
  6153. unsigned char *hdr;
  6154. } l4;
  6155. u32 paylen, l4_offset;
  6156. int err;
  6157. if (skb->ip_summed != CHECKSUM_PARTIAL)
  6158. return 0;
  6159. if (!skb_is_gso(skb))
  6160. return 0;
  6161. err = skb_cow_head(skb, 0);
  6162. if (err < 0)
  6163. return err;
  6164. ip.hdr = skb_network_header(skb);
  6165. l4.hdr = skb_checksum_start(skb);
  6166. /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
  6167. type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
  6168. /* initialize outer IP header fields */
  6169. if (ip.v4->version == 4) {
  6170. unsigned char *csum_start = skb_checksum_start(skb);
  6171. unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
  6172. /* IP header will have to cancel out any data that
  6173. * is not a part of the outer IP header
  6174. */
  6175. ip.v4->check = csum_fold(csum_partial(trans_start,
  6176. csum_start - trans_start,
  6177. 0));
  6178. type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
  6179. ip.v4->tot_len = 0;
  6180. first->tx_flags |= IXGBE_TX_FLAGS_TSO |
  6181. IXGBE_TX_FLAGS_CSUM |
  6182. IXGBE_TX_FLAGS_IPV4;
  6183. } else {
  6184. ip.v6->payload_len = 0;
  6185. first->tx_flags |= IXGBE_TX_FLAGS_TSO |
  6186. IXGBE_TX_FLAGS_CSUM;
  6187. }
  6188. /* determine offset of inner transport header */
  6189. l4_offset = l4.hdr - skb->data;
  6190. /* compute length of segmentation header */
  6191. *hdr_len = (l4.tcp->doff * 4) + l4_offset;
  6192. /* remove payload length from inner checksum */
  6193. paylen = skb->len - l4_offset;
  6194. csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
  6195. /* update gso size and bytecount with header size */
  6196. first->gso_segs = skb_shinfo(skb)->gso_segs;
  6197. first->bytecount += (first->gso_segs - 1) * *hdr_len;
  6198. /* mss_l4len_id: use 0 as index for TSO */
  6199. mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
  6200. mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
  6201. /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
  6202. vlan_macip_lens = l4.hdr - ip.hdr;
  6203. vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
  6204. vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
  6205. ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
  6206. mss_l4len_idx);
  6207. return 1;
  6208. }
  6209. static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
  6210. {
  6211. unsigned int offset = 0;
  6212. ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
  6213. return offset == skb_checksum_start_offset(skb);
  6214. }
  6215. static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
  6216. struct ixgbe_tx_buffer *first)
  6217. {
  6218. struct sk_buff *skb = first->skb;
  6219. u32 vlan_macip_lens = 0;
  6220. u32 type_tucmd = 0;
  6221. if (skb->ip_summed != CHECKSUM_PARTIAL) {
  6222. csum_failed:
  6223. if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
  6224. IXGBE_TX_FLAGS_CC)))
  6225. return;
  6226. goto no_csum;
  6227. }
  6228. switch (skb->csum_offset) {
  6229. case offsetof(struct tcphdr, check):
  6230. type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
  6231. /* fall through */
  6232. case offsetof(struct udphdr, check):
  6233. break;
  6234. case offsetof(struct sctphdr, checksum):
  6235. /* validate that this is actually an SCTP request */
  6236. if (((first->protocol == htons(ETH_P_IP)) &&
  6237. (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
  6238. ((first->protocol == htons(ETH_P_IPV6)) &&
  6239. ixgbe_ipv6_csum_is_sctp(skb))) {
  6240. type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
  6241. break;
  6242. }
  6243. /* fall through */
  6244. default:
  6245. skb_checksum_help(skb);
  6246. goto csum_failed;
  6247. }
  6248. /* update TX checksum flag */
  6249. first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
  6250. vlan_macip_lens = skb_checksum_start_offset(skb) -
  6251. skb_network_offset(skb);
  6252. no_csum:
  6253. /* vlan_macip_lens: MACLEN, VLAN tag */
  6254. vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
  6255. vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
  6256. ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
  6257. }
  6258. #define IXGBE_SET_FLAG(_input, _flag, _result) \
  6259. ((_flag <= _result) ? \
  6260. ((u32)(_input & _flag) * (_result / _flag)) : \
  6261. ((u32)(_input & _flag) / (_flag / _result)))
  6262. static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
  6263. {
  6264. /* set type for advanced descriptor with frame checksum insertion */
  6265. u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
  6266. IXGBE_ADVTXD_DCMD_DEXT |
  6267. IXGBE_ADVTXD_DCMD_IFCS;
  6268. /* set HW vlan bit if vlan is present */
  6269. cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_HW_VLAN,
  6270. IXGBE_ADVTXD_DCMD_VLE);
  6271. /* set segmentation enable bits for TSO/FSO */
  6272. cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSO,
  6273. IXGBE_ADVTXD_DCMD_TSE);
  6274. /* set timestamp bit if present */
  6275. cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP,
  6276. IXGBE_ADVTXD_MAC_TSTAMP);
  6277. /* insert frame checksum */
  6278. cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS);
  6279. return cmd_type;
  6280. }
  6281. static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
  6282. u32 tx_flags, unsigned int paylen)
  6283. {
  6284. u32 olinfo_status = paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
  6285. /* enable L4 checksum for TSO and TX checksum offload */
  6286. olinfo_status |= IXGBE_SET_FLAG(tx_flags,
  6287. IXGBE_TX_FLAGS_CSUM,
  6288. IXGBE_ADVTXD_POPTS_TXSM);
  6289. /* enble IPv4 checksum for TSO */
  6290. olinfo_status |= IXGBE_SET_FLAG(tx_flags,
  6291. IXGBE_TX_FLAGS_IPV4,
  6292. IXGBE_ADVTXD_POPTS_IXSM);
  6293. /*
  6294. * Check Context must be set if Tx switch is enabled, which it
  6295. * always is for case where virtual functions are running
  6296. */
  6297. olinfo_status |= IXGBE_SET_FLAG(tx_flags,
  6298. IXGBE_TX_FLAGS_CC,
  6299. IXGBE_ADVTXD_CC);
  6300. tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
  6301. }
  6302. static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
  6303. {
  6304. netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
  6305. /* Herbert's original patch had:
  6306. * smp_mb__after_netif_stop_queue();
  6307. * but since that doesn't exist yet, just open code it.
  6308. */
  6309. smp_mb();
  6310. /* We need to check again in a case another CPU has just
  6311. * made room available.
  6312. */
  6313. if (likely(ixgbe_desc_unused(tx_ring) < size))
  6314. return -EBUSY;
  6315. /* A reprieve! - use start_queue because it doesn't call schedule */
  6316. netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
  6317. ++tx_ring->tx_stats.restart_queue;
  6318. return 0;
  6319. }
  6320. static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
  6321. {
  6322. if (likely(ixgbe_desc_unused(tx_ring) >= size))
  6323. return 0;
  6324. return __ixgbe_maybe_stop_tx(tx_ring, size);
  6325. }
  6326. #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
  6327. IXGBE_TXD_CMD_RS)
  6328. static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
  6329. struct ixgbe_tx_buffer *first,
  6330. const u8 hdr_len)
  6331. {
  6332. struct sk_buff *skb = first->skb;
  6333. struct ixgbe_tx_buffer *tx_buffer;
  6334. union ixgbe_adv_tx_desc *tx_desc;
  6335. struct skb_frag_struct *frag;
  6336. dma_addr_t dma;
  6337. unsigned int data_len, size;
  6338. u32 tx_flags = first->tx_flags;
  6339. u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
  6340. u16 i = tx_ring->next_to_use;
  6341. tx_desc = IXGBE_TX_DESC(tx_ring, i);
  6342. ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
  6343. size = skb_headlen(skb);
  6344. data_len = skb->data_len;
  6345. #ifdef IXGBE_FCOE
  6346. if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
  6347. if (data_len < sizeof(struct fcoe_crc_eof)) {
  6348. size -= sizeof(struct fcoe_crc_eof) - data_len;
  6349. data_len = 0;
  6350. } else {
  6351. data_len -= sizeof(struct fcoe_crc_eof);
  6352. }
  6353. }
  6354. #endif
  6355. dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
  6356. tx_buffer = first;
  6357. for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
  6358. if (dma_mapping_error(tx_ring->dev, dma))
  6359. goto dma_error;
  6360. /* record length, and DMA address */
  6361. dma_unmap_len_set(tx_buffer, len, size);
  6362. dma_unmap_addr_set(tx_buffer, dma, dma);
  6363. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  6364. while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
  6365. tx_desc->read.cmd_type_len =
  6366. cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
  6367. i++;
  6368. tx_desc++;
  6369. if (i == tx_ring->count) {
  6370. tx_desc = IXGBE_TX_DESC(tx_ring, 0);
  6371. i = 0;
  6372. }
  6373. tx_desc->read.olinfo_status = 0;
  6374. dma += IXGBE_MAX_DATA_PER_TXD;
  6375. size -= IXGBE_MAX_DATA_PER_TXD;
  6376. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  6377. }
  6378. if (likely(!data_len))
  6379. break;
  6380. tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
  6381. i++;
  6382. tx_desc++;
  6383. if (i == tx_ring->count) {
  6384. tx_desc = IXGBE_TX_DESC(tx_ring, 0);
  6385. i = 0;
  6386. }
  6387. tx_desc->read.olinfo_status = 0;
  6388. #ifdef IXGBE_FCOE
  6389. size = min_t(unsigned int, data_len, skb_frag_size(frag));
  6390. #else
  6391. size = skb_frag_size(frag);
  6392. #endif
  6393. data_len -= size;
  6394. dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
  6395. DMA_TO_DEVICE);
  6396. tx_buffer = &tx_ring->tx_buffer_info[i];
  6397. }
  6398. /* write last descriptor with RS and EOP bits */
  6399. cmd_type |= size | IXGBE_TXD_CMD;
  6400. tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
  6401. netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
  6402. /* set the timestamp */
  6403. first->time_stamp = jiffies;
  6404. /*
  6405. * Force memory writes to complete before letting h/w know there
  6406. * are new descriptors to fetch. (Only applicable for weak-ordered
  6407. * memory model archs, such as IA-64).
  6408. *
  6409. * We also need this memory barrier to make certain all of the
  6410. * status bits have been updated before next_to_watch is written.
  6411. */
  6412. wmb();
  6413. /* set next_to_watch value indicating a packet is present */
  6414. first->next_to_watch = tx_desc;
  6415. i++;
  6416. if (i == tx_ring->count)
  6417. i = 0;
  6418. tx_ring->next_to_use = i;
  6419. ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
  6420. if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
  6421. writel(i, tx_ring->tail);
  6422. /* we need this if more than one processor can write to our tail
  6423. * at a time, it synchronizes IO on IA64/Altix systems
  6424. */
  6425. mmiowb();
  6426. }
  6427. return;
  6428. dma_error:
  6429. dev_err(tx_ring->dev, "TX DMA map failed\n");
  6430. /* clear dma mappings for failed tx_buffer_info map */
  6431. for (;;) {
  6432. tx_buffer = &tx_ring->tx_buffer_info[i];
  6433. ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
  6434. if (tx_buffer == first)
  6435. break;
  6436. if (i == 0)
  6437. i = tx_ring->count;
  6438. i--;
  6439. }
  6440. tx_ring->next_to_use = i;
  6441. }
  6442. static void ixgbe_atr(struct ixgbe_ring *ring,
  6443. struct ixgbe_tx_buffer *first)
  6444. {
  6445. struct ixgbe_q_vector *q_vector = ring->q_vector;
  6446. union ixgbe_atr_hash_dword input = { .dword = 0 };
  6447. union ixgbe_atr_hash_dword common = { .dword = 0 };
  6448. union {
  6449. unsigned char *network;
  6450. struct iphdr *ipv4;
  6451. struct ipv6hdr *ipv6;
  6452. } hdr;
  6453. struct tcphdr *th;
  6454. unsigned int hlen;
  6455. struct sk_buff *skb;
  6456. __be16 vlan_id;
  6457. int l4_proto;
  6458. /* if ring doesn't have a interrupt vector, cannot perform ATR */
  6459. if (!q_vector)
  6460. return;
  6461. /* do nothing if sampling is disabled */
  6462. if (!ring->atr_sample_rate)
  6463. return;
  6464. ring->atr_count++;
  6465. /* currently only IPv4/IPv6 with TCP is supported */
  6466. if ((first->protocol != htons(ETH_P_IP)) &&
  6467. (first->protocol != htons(ETH_P_IPV6)))
  6468. return;
  6469. /* snag network header to get L4 type and address */
  6470. skb = first->skb;
  6471. hdr.network = skb_network_header(skb);
  6472. if (skb->encapsulation &&
  6473. first->protocol == htons(ETH_P_IP) &&
  6474. hdr.ipv4->protocol != IPPROTO_UDP) {
  6475. struct ixgbe_adapter *adapter = q_vector->adapter;
  6476. /* verify the port is recognized as VXLAN */
  6477. if (adapter->vxlan_port &&
  6478. udp_hdr(skb)->dest == adapter->vxlan_port)
  6479. hdr.network = skb_inner_network_header(skb);
  6480. if (adapter->geneve_port &&
  6481. udp_hdr(skb)->dest == adapter->geneve_port)
  6482. hdr.network = skb_inner_network_header(skb);
  6483. }
  6484. /* Currently only IPv4/IPv6 with TCP is supported */
  6485. switch (hdr.ipv4->version) {
  6486. case IPVERSION:
  6487. /* access ihl as u8 to avoid unaligned access on ia64 */
  6488. hlen = (hdr.network[0] & 0x0F) << 2;
  6489. l4_proto = hdr.ipv4->protocol;
  6490. break;
  6491. case 6:
  6492. hlen = hdr.network - skb->data;
  6493. l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL);
  6494. hlen -= hdr.network - skb->data;
  6495. break;
  6496. default:
  6497. return;
  6498. }
  6499. if (l4_proto != IPPROTO_TCP)
  6500. return;
  6501. th = (struct tcphdr *)(hdr.network + hlen);
  6502. /* skip this packet since the socket is closing */
  6503. if (th->fin)
  6504. return;
  6505. /* sample on all syn packets or once every atr sample count */
  6506. if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
  6507. return;
  6508. /* reset sample count */
  6509. ring->atr_count = 0;
  6510. vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
  6511. /*
  6512. * src and dst are inverted, think how the receiver sees them
  6513. *
  6514. * The input is broken into two sections, a non-compressed section
  6515. * containing vm_pool, vlan_id, and flow_type. The rest of the data
  6516. * is XORed together and stored in the compressed dword.
  6517. */
  6518. input.formatted.vlan_id = vlan_id;
  6519. /*
  6520. * since src port and flex bytes occupy the same word XOR them together
  6521. * and write the value to source port portion of compressed dword
  6522. */
  6523. if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
  6524. common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
  6525. else
  6526. common.port.src ^= th->dest ^ first->protocol;
  6527. common.port.dst ^= th->source;
  6528. switch (hdr.ipv4->version) {
  6529. case IPVERSION:
  6530. input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
  6531. common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
  6532. break;
  6533. case 6:
  6534. input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
  6535. common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
  6536. hdr.ipv6->saddr.s6_addr32[1] ^
  6537. hdr.ipv6->saddr.s6_addr32[2] ^
  6538. hdr.ipv6->saddr.s6_addr32[3] ^
  6539. hdr.ipv6->daddr.s6_addr32[0] ^
  6540. hdr.ipv6->daddr.s6_addr32[1] ^
  6541. hdr.ipv6->daddr.s6_addr32[2] ^
  6542. hdr.ipv6->daddr.s6_addr32[3];
  6543. break;
  6544. default:
  6545. break;
  6546. }
  6547. if (hdr.network != skb_network_header(skb))
  6548. input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK;
  6549. /* This assumes the Rx queue and Tx queue are bound to the same CPU */
  6550. ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
  6551. input, common, ring->queue_index);
  6552. }
  6553. static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
  6554. void *accel_priv, select_queue_fallback_t fallback)
  6555. {
  6556. struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
  6557. #ifdef IXGBE_FCOE
  6558. struct ixgbe_adapter *adapter;
  6559. struct ixgbe_ring_feature *f;
  6560. int txq;
  6561. #endif
  6562. if (fwd_adapter)
  6563. return skb->queue_mapping + fwd_adapter->tx_base_queue;
  6564. #ifdef IXGBE_FCOE
  6565. /*
  6566. * only execute the code below if protocol is FCoE
  6567. * or FIP and we have FCoE enabled on the adapter
  6568. */
  6569. switch (vlan_get_protocol(skb)) {
  6570. case htons(ETH_P_FCOE):
  6571. case htons(ETH_P_FIP):
  6572. adapter = netdev_priv(dev);
  6573. if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
  6574. break;
  6575. default:
  6576. return fallback(dev, skb);
  6577. }
  6578. f = &adapter->ring_feature[RING_F_FCOE];
  6579. txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
  6580. smp_processor_id();
  6581. while (txq >= f->indices)
  6582. txq -= f->indices;
  6583. return txq + f->offset;
  6584. #else
  6585. return fallback(dev, skb);
  6586. #endif
  6587. }
  6588. netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
  6589. struct ixgbe_adapter *adapter,
  6590. struct ixgbe_ring *tx_ring)
  6591. {
  6592. struct ixgbe_tx_buffer *first;
  6593. int tso;
  6594. u32 tx_flags = 0;
  6595. unsigned short f;
  6596. u16 count = TXD_USE_COUNT(skb_headlen(skb));
  6597. __be16 protocol = skb->protocol;
  6598. u8 hdr_len = 0;
  6599. /*
  6600. * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
  6601. * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
  6602. * + 2 desc gap to keep tail from touching head,
  6603. * + 1 desc for context descriptor,
  6604. * otherwise try next time
  6605. */
  6606. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  6607. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
  6608. if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
  6609. tx_ring->tx_stats.tx_busy++;
  6610. return NETDEV_TX_BUSY;
  6611. }
  6612. /* record the location of the first descriptor for this packet */
  6613. first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
  6614. first->skb = skb;
  6615. first->bytecount = skb->len;
  6616. first->gso_segs = 1;
  6617. /* if we have a HW VLAN tag being added default to the HW one */
  6618. if (skb_vlan_tag_present(skb)) {
  6619. tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
  6620. tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
  6621. /* else if it is a SW VLAN check the next protocol and store the tag */
  6622. } else if (protocol == htons(ETH_P_8021Q)) {
  6623. struct vlan_hdr *vhdr, _vhdr;
  6624. vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
  6625. if (!vhdr)
  6626. goto out_drop;
  6627. tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
  6628. IXGBE_TX_FLAGS_VLAN_SHIFT;
  6629. tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
  6630. }
  6631. protocol = vlan_get_protocol(skb);
  6632. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
  6633. adapter->ptp_clock &&
  6634. !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS,
  6635. &adapter->state)) {
  6636. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  6637. tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
  6638. /* schedule check for Tx timestamp */
  6639. adapter->ptp_tx_skb = skb_get(skb);
  6640. adapter->ptp_tx_start = jiffies;
  6641. schedule_work(&adapter->ptp_tx_work);
  6642. }
  6643. skb_tx_timestamp(skb);
  6644. #ifdef CONFIG_PCI_IOV
  6645. /*
  6646. * Use the l2switch_enable flag - would be false if the DMA
  6647. * Tx switch had been disabled.
  6648. */
  6649. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
  6650. tx_flags |= IXGBE_TX_FLAGS_CC;
  6651. #endif
  6652. /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */
  6653. if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
  6654. ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) ||
  6655. (skb->priority != TC_PRIO_CONTROL))) {
  6656. tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
  6657. tx_flags |= (skb->priority & 0x7) <<
  6658. IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT;
  6659. if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) {
  6660. struct vlan_ethhdr *vhdr;
  6661. if (skb_cow_head(skb, 0))
  6662. goto out_drop;
  6663. vhdr = (struct vlan_ethhdr *)skb->data;
  6664. vhdr->h_vlan_TCI = htons(tx_flags >>
  6665. IXGBE_TX_FLAGS_VLAN_SHIFT);
  6666. } else {
  6667. tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
  6668. }
  6669. }
  6670. /* record initial flags and protocol */
  6671. first->tx_flags = tx_flags;
  6672. first->protocol = protocol;
  6673. #ifdef IXGBE_FCOE
  6674. /* setup tx offload for FCoE */
  6675. if ((protocol == htons(ETH_P_FCOE)) &&
  6676. (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
  6677. tso = ixgbe_fso(tx_ring, first, &hdr_len);
  6678. if (tso < 0)
  6679. goto out_drop;
  6680. goto xmit_fcoe;
  6681. }
  6682. #endif /* IXGBE_FCOE */
  6683. tso = ixgbe_tso(tx_ring, first, &hdr_len);
  6684. if (tso < 0)
  6685. goto out_drop;
  6686. else if (!tso)
  6687. ixgbe_tx_csum(tx_ring, first);
  6688. /* add the ATR filter if ATR is on */
  6689. if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
  6690. ixgbe_atr(tx_ring, first);
  6691. #ifdef IXGBE_FCOE
  6692. xmit_fcoe:
  6693. #endif /* IXGBE_FCOE */
  6694. ixgbe_tx_map(tx_ring, first, hdr_len);
  6695. return NETDEV_TX_OK;
  6696. out_drop:
  6697. dev_kfree_skb_any(first->skb);
  6698. first->skb = NULL;
  6699. return NETDEV_TX_OK;
  6700. }
  6701. static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
  6702. struct net_device *netdev,
  6703. struct ixgbe_ring *ring)
  6704. {
  6705. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  6706. struct ixgbe_ring *tx_ring;
  6707. /*
  6708. * The minimum packet size for olinfo paylen is 17 so pad the skb
  6709. * in order to meet this minimum size requirement.
  6710. */
  6711. if (skb_put_padto(skb, 17))
  6712. return NETDEV_TX_OK;
  6713. tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
  6714. return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
  6715. }
  6716. static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
  6717. struct net_device *netdev)
  6718. {
  6719. return __ixgbe_xmit_frame(skb, netdev, NULL);
  6720. }
  6721. /**
  6722. * ixgbe_set_mac - Change the Ethernet Address of the NIC
  6723. * @netdev: network interface device structure
  6724. * @p: pointer to an address structure
  6725. *
  6726. * Returns 0 on success, negative on failure
  6727. **/
  6728. static int ixgbe_set_mac(struct net_device *netdev, void *p)
  6729. {
  6730. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  6731. struct ixgbe_hw *hw = &adapter->hw;
  6732. struct sockaddr *addr = p;
  6733. if (!is_valid_ether_addr(addr->sa_data))
  6734. return -EADDRNOTAVAIL;
  6735. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  6736. memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
  6737. ixgbe_mac_set_default_filter(adapter);
  6738. return 0;
  6739. }
  6740. static int
  6741. ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
  6742. {
  6743. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  6744. struct ixgbe_hw *hw = &adapter->hw;
  6745. u16 value;
  6746. int rc;
  6747. if (prtad != hw->phy.mdio.prtad)
  6748. return -EINVAL;
  6749. rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
  6750. if (!rc)
  6751. rc = value;
  6752. return rc;
  6753. }
  6754. static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
  6755. u16 addr, u16 value)
  6756. {
  6757. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  6758. struct ixgbe_hw *hw = &adapter->hw;
  6759. if (prtad != hw->phy.mdio.prtad)
  6760. return -EINVAL;
  6761. return hw->phy.ops.write_reg(hw, addr, devad, value);
  6762. }
  6763. static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
  6764. {
  6765. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  6766. switch (cmd) {
  6767. case SIOCSHWTSTAMP:
  6768. return ixgbe_ptp_set_ts_config(adapter, req);
  6769. case SIOCGHWTSTAMP:
  6770. return ixgbe_ptp_get_ts_config(adapter, req);
  6771. default:
  6772. return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
  6773. }
  6774. }
  6775. /**
  6776. * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
  6777. * netdev->dev_addrs
  6778. * @netdev: network interface device structure
  6779. *
  6780. * Returns non-zero on failure
  6781. **/
  6782. static int ixgbe_add_sanmac_netdev(struct net_device *dev)
  6783. {
  6784. int err = 0;
  6785. struct ixgbe_adapter *adapter = netdev_priv(dev);
  6786. struct ixgbe_hw *hw = &adapter->hw;
  6787. if (is_valid_ether_addr(hw->mac.san_addr)) {
  6788. rtnl_lock();
  6789. err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
  6790. rtnl_unlock();
  6791. /* update SAN MAC vmdq pool selection */
  6792. hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
  6793. }
  6794. return err;
  6795. }
  6796. /**
  6797. * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
  6798. * netdev->dev_addrs
  6799. * @netdev: network interface device structure
  6800. *
  6801. * Returns non-zero on failure
  6802. **/
  6803. static int ixgbe_del_sanmac_netdev(struct net_device *dev)
  6804. {
  6805. int err = 0;
  6806. struct ixgbe_adapter *adapter = netdev_priv(dev);
  6807. struct ixgbe_mac_info *mac = &adapter->hw.mac;
  6808. if (is_valid_ether_addr(mac->san_addr)) {
  6809. rtnl_lock();
  6810. err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
  6811. rtnl_unlock();
  6812. }
  6813. return err;
  6814. }
  6815. #ifdef CONFIG_NET_POLL_CONTROLLER
  6816. /*
  6817. * Polling 'interrupt' - used by things like netconsole to send skbs
  6818. * without having to re-enable interrupts. It's not called while
  6819. * the interrupt routine is executing.
  6820. */
  6821. static void ixgbe_netpoll(struct net_device *netdev)
  6822. {
  6823. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  6824. int i;
  6825. /* if interface is down do nothing */
  6826. if (test_bit(__IXGBE_DOWN, &adapter->state))
  6827. return;
  6828. /* loop through and schedule all active queues */
  6829. for (i = 0; i < adapter->num_q_vectors; i++)
  6830. ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
  6831. }
  6832. #endif
  6833. static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
  6834. struct rtnl_link_stats64 *stats)
  6835. {
  6836. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  6837. int i;
  6838. rcu_read_lock();
  6839. for (i = 0; i < adapter->num_rx_queues; i++) {
  6840. struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
  6841. u64 bytes, packets;
  6842. unsigned int start;
  6843. if (ring) {
  6844. do {
  6845. start = u64_stats_fetch_begin_irq(&ring->syncp);
  6846. packets = ring->stats.packets;
  6847. bytes = ring->stats.bytes;
  6848. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  6849. stats->rx_packets += packets;
  6850. stats->rx_bytes += bytes;
  6851. }
  6852. }
  6853. for (i = 0; i < adapter->num_tx_queues; i++) {
  6854. struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
  6855. u64 bytes, packets;
  6856. unsigned int start;
  6857. if (ring) {
  6858. do {
  6859. start = u64_stats_fetch_begin_irq(&ring->syncp);
  6860. packets = ring->stats.packets;
  6861. bytes = ring->stats.bytes;
  6862. } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
  6863. stats->tx_packets += packets;
  6864. stats->tx_bytes += bytes;
  6865. }
  6866. }
  6867. rcu_read_unlock();
  6868. /* following stats updated by ixgbe_watchdog_task() */
  6869. stats->multicast = netdev->stats.multicast;
  6870. stats->rx_errors = netdev->stats.rx_errors;
  6871. stats->rx_length_errors = netdev->stats.rx_length_errors;
  6872. stats->rx_crc_errors = netdev->stats.rx_crc_errors;
  6873. stats->rx_missed_errors = netdev->stats.rx_missed_errors;
  6874. return stats;
  6875. }
  6876. #ifdef CONFIG_IXGBE_DCB
  6877. /**
  6878. * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
  6879. * @adapter: pointer to ixgbe_adapter
  6880. * @tc: number of traffic classes currently enabled
  6881. *
  6882. * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
  6883. * 802.1Q priority maps to a packet buffer that exists.
  6884. */
  6885. static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
  6886. {
  6887. struct ixgbe_hw *hw = &adapter->hw;
  6888. u32 reg, rsave;
  6889. int i;
  6890. /* 82598 have a static priority to TC mapping that can not
  6891. * be changed so no validation is needed.
  6892. */
  6893. if (hw->mac.type == ixgbe_mac_82598EB)
  6894. return;
  6895. reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
  6896. rsave = reg;
  6897. for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
  6898. u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
  6899. /* If up2tc is out of bounds default to zero */
  6900. if (up2tc > tc)
  6901. reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
  6902. }
  6903. if (reg != rsave)
  6904. IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
  6905. return;
  6906. }
  6907. /**
  6908. * ixgbe_set_prio_tc_map - Configure netdev prio tc map
  6909. * @adapter: Pointer to adapter struct
  6910. *
  6911. * Populate the netdev user priority to tc map
  6912. */
  6913. static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
  6914. {
  6915. struct net_device *dev = adapter->netdev;
  6916. struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
  6917. struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
  6918. u8 prio;
  6919. for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
  6920. u8 tc = 0;
  6921. if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
  6922. tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
  6923. else if (ets)
  6924. tc = ets->prio_tc[prio];
  6925. netdev_set_prio_tc_map(dev, prio, tc);
  6926. }
  6927. }
  6928. #endif /* CONFIG_IXGBE_DCB */
  6929. /**
  6930. * ixgbe_setup_tc - configure net_device for multiple traffic classes
  6931. *
  6932. * @netdev: net device to configure
  6933. * @tc: number of traffic classes to enable
  6934. */
  6935. int ixgbe_setup_tc(struct net_device *dev, u8 tc)
  6936. {
  6937. struct ixgbe_adapter *adapter = netdev_priv(dev);
  6938. struct ixgbe_hw *hw = &adapter->hw;
  6939. bool pools;
  6940. /* Hardware supports up to 8 traffic classes */
  6941. if (tc > adapter->dcb_cfg.num_tcs.pg_tcs)
  6942. return -EINVAL;
  6943. if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS)
  6944. return -EINVAL;
  6945. pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
  6946. if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS)
  6947. return -EBUSY;
  6948. /* Hardware has to reinitialize queues and interrupts to
  6949. * match packet buffer alignment. Unfortunately, the
  6950. * hardware is not flexible enough to do this dynamically.
  6951. */
  6952. if (netif_running(dev))
  6953. ixgbe_close(dev);
  6954. else
  6955. ixgbe_reset(adapter);
  6956. ixgbe_clear_interrupt_scheme(adapter);
  6957. #ifdef CONFIG_IXGBE_DCB
  6958. if (tc) {
  6959. netdev_set_num_tc(dev, tc);
  6960. ixgbe_set_prio_tc_map(adapter);
  6961. adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
  6962. if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
  6963. adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
  6964. adapter->hw.fc.requested_mode = ixgbe_fc_none;
  6965. }
  6966. } else {
  6967. netdev_reset_tc(dev);
  6968. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  6969. adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
  6970. adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
  6971. adapter->temp_dcb_cfg.pfc_mode_enable = false;
  6972. adapter->dcb_cfg.pfc_mode_enable = false;
  6973. }
  6974. ixgbe_validate_rtr(adapter, tc);
  6975. #endif /* CONFIG_IXGBE_DCB */
  6976. ixgbe_init_interrupt_scheme(adapter);
  6977. if (netif_running(dev))
  6978. return ixgbe_open(dev);
  6979. return 0;
  6980. }
  6981. static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
  6982. struct tc_cls_u32_offload *cls)
  6983. {
  6984. u32 hdl = cls->knode.handle;
  6985. u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
  6986. u32 loc = cls->knode.handle & 0xfffff;
  6987. int err = 0, i, j;
  6988. struct ixgbe_jump_table *jump = NULL;
  6989. if (loc > IXGBE_MAX_HW_ENTRIES)
  6990. return -EINVAL;
  6991. if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
  6992. return -EINVAL;
  6993. /* Clear this filter in the link data it is associated with */
  6994. if (uhtid != 0x800) {
  6995. jump = adapter->jump_tables[uhtid];
  6996. if (!jump)
  6997. return -EINVAL;
  6998. if (!test_bit(loc - 1, jump->child_loc_map))
  6999. return -EINVAL;
  7000. clear_bit(loc - 1, jump->child_loc_map);
  7001. }
  7002. /* Check if the filter being deleted is a link */
  7003. for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
  7004. jump = adapter->jump_tables[i];
  7005. if (jump && jump->link_hdl == hdl) {
  7006. /* Delete filters in the hardware in the child hash
  7007. * table associated with this link
  7008. */
  7009. for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
  7010. if (!test_bit(j, jump->child_loc_map))
  7011. continue;
  7012. spin_lock(&adapter->fdir_perfect_lock);
  7013. err = ixgbe_update_ethtool_fdir_entry(adapter,
  7014. NULL,
  7015. j + 1);
  7016. spin_unlock(&adapter->fdir_perfect_lock);
  7017. clear_bit(j, jump->child_loc_map);
  7018. }
  7019. /* Remove resources for this link */
  7020. kfree(jump->input);
  7021. kfree(jump->mask);
  7022. kfree(jump);
  7023. adapter->jump_tables[i] = NULL;
  7024. return err;
  7025. }
  7026. }
  7027. spin_lock(&adapter->fdir_perfect_lock);
  7028. err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
  7029. spin_unlock(&adapter->fdir_perfect_lock);
  7030. return err;
  7031. }
  7032. static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter,
  7033. __be16 protocol,
  7034. struct tc_cls_u32_offload *cls)
  7035. {
  7036. u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
  7037. if (uhtid >= IXGBE_MAX_LINK_HANDLE)
  7038. return -EINVAL;
  7039. /* This ixgbe devices do not support hash tables at the moment
  7040. * so abort when given hash tables.
  7041. */
  7042. if (cls->hnode.divisor > 0)
  7043. return -EINVAL;
  7044. set_bit(uhtid - 1, &adapter->tables);
  7045. return 0;
  7046. }
  7047. static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
  7048. struct tc_cls_u32_offload *cls)
  7049. {
  7050. u32 uhtid = TC_U32_USERHTID(cls->hnode.handle);
  7051. if (uhtid >= IXGBE_MAX_LINK_HANDLE)
  7052. return -EINVAL;
  7053. clear_bit(uhtid - 1, &adapter->tables);
  7054. return 0;
  7055. }
  7056. #ifdef CONFIG_NET_CLS_ACT
  7057. static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
  7058. u8 *queue, u64 *action)
  7059. {
  7060. unsigned int num_vfs = adapter->num_vfs, vf;
  7061. struct net_device *upper;
  7062. struct list_head *iter;
  7063. /* redirect to a SRIOV VF */
  7064. for (vf = 0; vf < num_vfs; ++vf) {
  7065. upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
  7066. if (upper->ifindex == ifindex) {
  7067. if (adapter->num_rx_pools > 1)
  7068. *queue = vf * 2;
  7069. else
  7070. *queue = vf * adapter->num_rx_queues_per_pool;
  7071. *action = vf + 1;
  7072. *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
  7073. return 0;
  7074. }
  7075. }
  7076. /* redirect to a offloaded macvlan netdev */
  7077. netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
  7078. if (netif_is_macvlan(upper)) {
  7079. struct macvlan_dev *dfwd = netdev_priv(upper);
  7080. struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
  7081. if (vadapter && vadapter->netdev->ifindex == ifindex) {
  7082. *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
  7083. *action = *queue;
  7084. return 0;
  7085. }
  7086. }
  7087. }
  7088. return -EINVAL;
  7089. }
  7090. static int parse_tc_actions(struct ixgbe_adapter *adapter,
  7091. struct tcf_exts *exts, u64 *action, u8 *queue)
  7092. {
  7093. const struct tc_action *a;
  7094. LIST_HEAD(actions);
  7095. int err;
  7096. if (tc_no_actions(exts))
  7097. return -EINVAL;
  7098. tcf_exts_to_list(exts, &actions);
  7099. list_for_each_entry(a, &actions, list) {
  7100. /* Drop action */
  7101. if (is_tcf_gact_shot(a)) {
  7102. *action = IXGBE_FDIR_DROP_QUEUE;
  7103. *queue = IXGBE_FDIR_DROP_QUEUE;
  7104. return 0;
  7105. }
  7106. /* Redirect to a VF or a offloaded macvlan */
  7107. if (is_tcf_mirred_redirect(a)) {
  7108. int ifindex = tcf_mirred_ifindex(a);
  7109. err = handle_redirect_action(adapter, ifindex, queue,
  7110. action);
  7111. if (err == 0)
  7112. return err;
  7113. }
  7114. }
  7115. return -EINVAL;
  7116. }
  7117. #else
  7118. static int parse_tc_actions(struct ixgbe_adapter *adapter,
  7119. struct tcf_exts *exts, u64 *action, u8 *queue)
  7120. {
  7121. return -EINVAL;
  7122. }
  7123. #endif /* CONFIG_NET_CLS_ACT */
  7124. static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
  7125. union ixgbe_atr_input *mask,
  7126. struct tc_cls_u32_offload *cls,
  7127. struct ixgbe_mat_field *field_ptr,
  7128. struct ixgbe_nexthdr *nexthdr)
  7129. {
  7130. int i, j, off;
  7131. __be32 val, m;
  7132. bool found_entry = false, found_jump_field = false;
  7133. for (i = 0; i < cls->knode.sel->nkeys; i++) {
  7134. off = cls->knode.sel->keys[i].off;
  7135. val = cls->knode.sel->keys[i].val;
  7136. m = cls->knode.sel->keys[i].mask;
  7137. for (j = 0; field_ptr[j].val; j++) {
  7138. if (field_ptr[j].off == off) {
  7139. field_ptr[j].val(input, mask, val, m);
  7140. input->filter.formatted.flow_type |=
  7141. field_ptr[j].type;
  7142. found_entry = true;
  7143. break;
  7144. }
  7145. }
  7146. if (nexthdr) {
  7147. if (nexthdr->off == cls->knode.sel->keys[i].off &&
  7148. nexthdr->val == cls->knode.sel->keys[i].val &&
  7149. nexthdr->mask == cls->knode.sel->keys[i].mask)
  7150. found_jump_field = true;
  7151. else
  7152. continue;
  7153. }
  7154. }
  7155. if (nexthdr && !found_jump_field)
  7156. return -EINVAL;
  7157. if (!found_entry)
  7158. return 0;
  7159. mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
  7160. IXGBE_ATR_L4TYPE_MASK;
  7161. if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
  7162. mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
  7163. return 0;
  7164. }
  7165. static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
  7166. __be16 protocol,
  7167. struct tc_cls_u32_offload *cls)
  7168. {
  7169. u32 loc = cls->knode.handle & 0xfffff;
  7170. struct ixgbe_hw *hw = &adapter->hw;
  7171. struct ixgbe_mat_field *field_ptr;
  7172. struct ixgbe_fdir_filter *input = NULL;
  7173. union ixgbe_atr_input *mask = NULL;
  7174. struct ixgbe_jump_table *jump = NULL;
  7175. int i, err = -EINVAL;
  7176. u8 queue;
  7177. u32 uhtid, link_uhtid;
  7178. uhtid = TC_U32_USERHTID(cls->knode.handle);
  7179. link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
  7180. /* At the moment cls_u32 jumps to network layer and skips past
  7181. * L2 headers. The canonical method to match L2 frames is to use
  7182. * negative values. However this is error prone at best but really
  7183. * just broken because there is no way to "know" what sort of hdr
  7184. * is in front of the network layer. Fix cls_u32 to support L2
  7185. * headers when needed.
  7186. */
  7187. if (protocol != htons(ETH_P_IP))
  7188. return err;
  7189. if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
  7190. e_err(drv, "Location out of range\n");
  7191. return err;
  7192. }
  7193. /* cls u32 is a graph starting at root node 0x800. The driver tracks
  7194. * links and also the fields used to advance the parser across each
  7195. * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map
  7196. * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h
  7197. * To add support for new nodes update ixgbe_model.h parse structures
  7198. * this function _should_ be generic try not to hardcode values here.
  7199. */
  7200. if (uhtid == 0x800) {
  7201. field_ptr = (adapter->jump_tables[0])->mat;
  7202. } else {
  7203. if (uhtid >= IXGBE_MAX_LINK_HANDLE)
  7204. return err;
  7205. if (!adapter->jump_tables[uhtid])
  7206. return err;
  7207. field_ptr = (adapter->jump_tables[uhtid])->mat;
  7208. }
  7209. if (!field_ptr)
  7210. return err;
  7211. /* At this point we know the field_ptr is valid and need to either
  7212. * build cls_u32 link or attach filter. Because adding a link to
  7213. * a handle that does not exist is invalid and the same for adding
  7214. * rules to handles that don't exist.
  7215. */
  7216. if (link_uhtid) {
  7217. struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
  7218. if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
  7219. return err;
  7220. if (!test_bit(link_uhtid - 1, &adapter->tables))
  7221. return err;
  7222. /* Multiple filters as links to the same hash table are not
  7223. * supported. To add a new filter with the same next header
  7224. * but different match/jump conditions, create a new hash table
  7225. * and link to it.
  7226. */
  7227. if (adapter->jump_tables[link_uhtid] &&
  7228. (adapter->jump_tables[link_uhtid])->link_hdl) {
  7229. e_err(drv, "Link filter exists for link: %x\n",
  7230. link_uhtid);
  7231. return err;
  7232. }
  7233. for (i = 0; nexthdr[i].jump; i++) {
  7234. if (nexthdr[i].o != cls->knode.sel->offoff ||
  7235. nexthdr[i].s != cls->knode.sel->offshift ||
  7236. nexthdr[i].m != cls->knode.sel->offmask)
  7237. return err;
  7238. jump = kzalloc(sizeof(*jump), GFP_KERNEL);
  7239. if (!jump)
  7240. return -ENOMEM;
  7241. input = kzalloc(sizeof(*input), GFP_KERNEL);
  7242. if (!input) {
  7243. err = -ENOMEM;
  7244. goto free_jump;
  7245. }
  7246. mask = kzalloc(sizeof(*mask), GFP_KERNEL);
  7247. if (!mask) {
  7248. err = -ENOMEM;
  7249. goto free_input;
  7250. }
  7251. jump->input = input;
  7252. jump->mask = mask;
  7253. jump->link_hdl = cls->knode.handle;
  7254. err = ixgbe_clsu32_build_input(input, mask, cls,
  7255. field_ptr, &nexthdr[i]);
  7256. if (!err) {
  7257. jump->mat = nexthdr[i].jump;
  7258. adapter->jump_tables[link_uhtid] = jump;
  7259. break;
  7260. }
  7261. }
  7262. return 0;
  7263. }
  7264. input = kzalloc(sizeof(*input), GFP_KERNEL);
  7265. if (!input)
  7266. return -ENOMEM;
  7267. mask = kzalloc(sizeof(*mask), GFP_KERNEL);
  7268. if (!mask) {
  7269. err = -ENOMEM;
  7270. goto free_input;
  7271. }
  7272. if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
  7273. if ((adapter->jump_tables[uhtid])->input)
  7274. memcpy(input, (adapter->jump_tables[uhtid])->input,
  7275. sizeof(*input));
  7276. if ((adapter->jump_tables[uhtid])->mask)
  7277. memcpy(mask, (adapter->jump_tables[uhtid])->mask,
  7278. sizeof(*mask));
  7279. /* Lookup in all child hash tables if this location is already
  7280. * filled with a filter
  7281. */
  7282. for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
  7283. struct ixgbe_jump_table *link = adapter->jump_tables[i];
  7284. if (link && (test_bit(loc - 1, link->child_loc_map))) {
  7285. e_err(drv, "Filter exists in location: %x\n",
  7286. loc);
  7287. err = -EINVAL;
  7288. goto err_out;
  7289. }
  7290. }
  7291. }
  7292. err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
  7293. if (err)
  7294. goto err_out;
  7295. err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
  7296. &queue);
  7297. if (err < 0)
  7298. goto err_out;
  7299. input->sw_idx = loc;
  7300. spin_lock(&adapter->fdir_perfect_lock);
  7301. if (hlist_empty(&adapter->fdir_filter_list)) {
  7302. memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
  7303. err = ixgbe_fdir_set_input_mask_82599(hw, mask);
  7304. if (err)
  7305. goto err_out_w_lock;
  7306. } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
  7307. err = -EINVAL;
  7308. goto err_out_w_lock;
  7309. }
  7310. ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
  7311. err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
  7312. input->sw_idx, queue);
  7313. if (!err)
  7314. ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
  7315. spin_unlock(&adapter->fdir_perfect_lock);
  7316. if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
  7317. set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
  7318. kfree(mask);
  7319. return err;
  7320. err_out_w_lock:
  7321. spin_unlock(&adapter->fdir_perfect_lock);
  7322. err_out:
  7323. kfree(mask);
  7324. free_input:
  7325. kfree(input);
  7326. free_jump:
  7327. kfree(jump);
  7328. return err;
  7329. }
  7330. static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
  7331. struct tc_to_netdev *tc)
  7332. {
  7333. struct ixgbe_adapter *adapter = netdev_priv(dev);
  7334. if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
  7335. tc->type == TC_SETUP_CLSU32) {
  7336. switch (tc->cls_u32->command) {
  7337. case TC_CLSU32_NEW_KNODE:
  7338. case TC_CLSU32_REPLACE_KNODE:
  7339. return ixgbe_configure_clsu32(adapter,
  7340. proto, tc->cls_u32);
  7341. case TC_CLSU32_DELETE_KNODE:
  7342. return ixgbe_delete_clsu32(adapter, tc->cls_u32);
  7343. case TC_CLSU32_NEW_HNODE:
  7344. case TC_CLSU32_REPLACE_HNODE:
  7345. return ixgbe_configure_clsu32_add_hnode(adapter, proto,
  7346. tc->cls_u32);
  7347. case TC_CLSU32_DELETE_HNODE:
  7348. return ixgbe_configure_clsu32_del_hnode(adapter,
  7349. tc->cls_u32);
  7350. default:
  7351. return -EINVAL;
  7352. }
  7353. }
  7354. if (tc->type != TC_SETUP_MQPRIO)
  7355. return -EINVAL;
  7356. return ixgbe_setup_tc(dev, tc->tc);
  7357. }
  7358. #ifdef CONFIG_PCI_IOV
  7359. void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
  7360. {
  7361. struct net_device *netdev = adapter->netdev;
  7362. rtnl_lock();
  7363. ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
  7364. rtnl_unlock();
  7365. }
  7366. #endif
  7367. void ixgbe_do_reset(struct net_device *netdev)
  7368. {
  7369. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  7370. if (netif_running(netdev))
  7371. ixgbe_reinit_locked(adapter);
  7372. else
  7373. ixgbe_reset(adapter);
  7374. }
  7375. static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
  7376. netdev_features_t features)
  7377. {
  7378. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  7379. /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
  7380. if (!(features & NETIF_F_RXCSUM))
  7381. features &= ~NETIF_F_LRO;
  7382. /* Turn off LRO if not RSC capable */
  7383. if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
  7384. features &= ~NETIF_F_LRO;
  7385. return features;
  7386. }
  7387. static int ixgbe_set_features(struct net_device *netdev,
  7388. netdev_features_t features)
  7389. {
  7390. struct ixgbe_adapter *adapter = netdev_priv(netdev);
  7391. netdev_features_t changed = netdev->features ^ features;
  7392. bool need_reset = false;
  7393. /* Make sure RSC matches LRO, reset if change */
  7394. if (!(features & NETIF_F_LRO)) {
  7395. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
  7396. need_reset = true;
  7397. adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
  7398. } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
  7399. !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
  7400. if (adapter->rx_itr_setting == 1 ||
  7401. adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
  7402. adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
  7403. need_reset = true;
  7404. } else if ((changed ^ features) & NETIF_F_LRO) {
  7405. e_info(probe, "rx-usecs set too low, "
  7406. "disabling RSC\n");
  7407. }
  7408. }
  7409. /*
  7410. * Check if Flow Director n-tuple support or hw_tc support was
  7411. * enabled or disabled. If the state changed, we need to reset.
  7412. */
  7413. if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) {
  7414. /* turn off ATR, enable perfect filters and reset */
  7415. if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
  7416. need_reset = true;
  7417. adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
  7418. adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
  7419. } else {
  7420. /* turn off perfect filters, enable ATR and reset */
  7421. if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
  7422. need_reset = true;
  7423. adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
  7424. /* We cannot enable ATR if SR-IOV is enabled */
  7425. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED ||
  7426. /* We cannot enable ATR if we have 2 or more tcs */
  7427. (netdev_get_num_tc(netdev) > 1) ||
  7428. /* We cannot enable ATR if RSS is disabled */
  7429. (adapter->ring_feature[RING_F_RSS].limit <= 1) ||
  7430. /* A sample rate of 0 indicates ATR disabled */
  7431. (!adapter->atr_sample_rate))
  7432. ; /* do nothing not supported */
  7433. else /* otherwise supported and set the flag */
  7434. adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
  7435. }
  7436. if (changed & NETIF_F_RXALL)
  7437. need_reset = true;
  7438. netdev->features = features;
  7439. if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
  7440. if (features & NETIF_F_RXCSUM) {
  7441. adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
  7442. } else {
  7443. u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
  7444. ixgbe_clear_udp_tunnel_port(adapter, port_mask);
  7445. }
  7446. }
  7447. if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
  7448. if (features & NETIF_F_RXCSUM) {
  7449. adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
  7450. } else {
  7451. u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
  7452. ixgbe_clear_udp_tunnel_port(adapter, port_mask);
  7453. }
  7454. }
  7455. if (need_reset)
  7456. ixgbe_do_reset(netdev);
  7457. else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
  7458. NETIF_F_HW_VLAN_CTAG_FILTER))
  7459. ixgbe_set_rx_mode(netdev);
  7460. return 0;
  7461. }
  7462. /**
  7463. * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports
  7464. * @dev: The port's netdev
  7465. * @ti: Tunnel endpoint information
  7466. **/
  7467. static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
  7468. struct udp_tunnel_info *ti)
  7469. {
  7470. struct ixgbe_adapter *adapter = netdev_priv(dev);
  7471. struct ixgbe_hw *hw = &adapter->hw;
  7472. __be16 port = ti->port;
  7473. u32 port_shift = 0;
  7474. u32 reg;
  7475. if (ti->sa_family != AF_INET)
  7476. return;
  7477. switch (ti->type) {
  7478. case UDP_TUNNEL_TYPE_VXLAN:
  7479. if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
  7480. return;
  7481. if (adapter->vxlan_port == port)
  7482. return;
  7483. if (adapter->vxlan_port) {
  7484. netdev_info(dev,
  7485. "VXLAN port %d set, not adding port %d\n",
  7486. ntohs(adapter->vxlan_port),
  7487. ntohs(port));
  7488. return;
  7489. }
  7490. adapter->vxlan_port = port;
  7491. break;
  7492. case UDP_TUNNEL_TYPE_GENEVE:
  7493. if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
  7494. return;
  7495. if (adapter->geneve_port == port)
  7496. return;
  7497. if (adapter->geneve_port) {
  7498. netdev_info(dev,
  7499. "GENEVE port %d set, not adding port %d\n",
  7500. ntohs(adapter->geneve_port),
  7501. ntohs(port));
  7502. return;
  7503. }
  7504. port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
  7505. adapter->geneve_port = port;
  7506. break;
  7507. default:
  7508. return;
  7509. }
  7510. reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
  7511. IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
  7512. }
  7513. /**
  7514. * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports
  7515. * @dev: The port's netdev
  7516. * @ti: Tunnel endpoint information
  7517. **/
  7518. static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
  7519. struct udp_tunnel_info *ti)
  7520. {
  7521. struct ixgbe_adapter *adapter = netdev_priv(dev);
  7522. u32 port_mask;
  7523. if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
  7524. ti->type != UDP_TUNNEL_TYPE_GENEVE)
  7525. return;
  7526. if (ti->sa_family != AF_INET)
  7527. return;
  7528. switch (ti->type) {
  7529. case UDP_TUNNEL_TYPE_VXLAN:
  7530. if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
  7531. return;
  7532. if (adapter->vxlan_port != ti->port) {
  7533. netdev_info(dev, "VXLAN port %d not found\n",
  7534. ntohs(ti->port));
  7535. return;
  7536. }
  7537. port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
  7538. break;
  7539. case UDP_TUNNEL_TYPE_GENEVE:
  7540. if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
  7541. return;
  7542. if (adapter->geneve_port != ti->port) {
  7543. netdev_info(dev, "GENEVE port %d not found\n",
  7544. ntohs(ti->port));
  7545. return;
  7546. }
  7547. port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
  7548. break;
  7549. default:
  7550. return;
  7551. }
  7552. ixgbe_clear_udp_tunnel_port(adapter, port_mask);
  7553. adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
  7554. }
  7555. static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  7556. struct net_device *dev,
  7557. const unsigned char *addr, u16 vid,
  7558. u16 flags)
  7559. {
  7560. /* guarantee we can provide a unique filter for the unicast address */
  7561. if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
  7562. struct ixgbe_adapter *adapter = netdev_priv(dev);
  7563. u16 pool = VMDQ_P(0);
  7564. if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
  7565. return -ENOMEM;
  7566. }
  7567. return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
  7568. }
  7569. /**
  7570. * ixgbe_configure_bridge_mode - set various bridge modes
  7571. * @adapter - the private structure
  7572. * @mode - requested bridge mode
  7573. *
  7574. * Configure some settings require for various bridge modes.
  7575. **/
  7576. static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
  7577. __u16 mode)
  7578. {
  7579. struct ixgbe_hw *hw = &adapter->hw;
  7580. unsigned int p, num_pools;
  7581. u32 vmdctl;
  7582. switch (mode) {
  7583. case BRIDGE_MODE_VEPA:
  7584. /* disable Tx loopback, rely on switch hairpin mode */
  7585. IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0);
  7586. /* must enable Rx switching replication to allow multicast
  7587. * packet reception on all VFs, and to enable source address
  7588. * pruning.
  7589. */
  7590. vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
  7591. vmdctl |= IXGBE_VT_CTL_REPLEN;
  7592. IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
  7593. /* enable Rx source address pruning. Note, this requires
  7594. * replication to be enabled or else it does nothing.
  7595. */
  7596. num_pools = adapter->num_vfs + adapter->num_rx_pools;
  7597. for (p = 0; p < num_pools; p++) {
  7598. if (hw->mac.ops.set_source_address_pruning)
  7599. hw->mac.ops.set_source_address_pruning(hw,
  7600. true,
  7601. p);
  7602. }
  7603. break;
  7604. case BRIDGE_MODE_VEB:
  7605. /* enable Tx loopback for internal VF/PF communication */
  7606. IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC,
  7607. IXGBE_PFDTXGSWC_VT_LBEN);
  7608. /* disable Rx switching replication unless we have SR-IOV
  7609. * virtual functions
  7610. */
  7611. vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
  7612. if (!adapter->num_vfs)
  7613. vmdctl &= ~IXGBE_VT_CTL_REPLEN;
  7614. IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
  7615. /* disable Rx source address pruning, since we don't expect to
  7616. * be receiving external loopback of our transmitted frames.
  7617. */
  7618. num_pools = adapter->num_vfs + adapter->num_rx_pools;
  7619. for (p = 0; p < num_pools; p++) {
  7620. if (hw->mac.ops.set_source_address_pruning)
  7621. hw->mac.ops.set_source_address_pruning(hw,
  7622. false,
  7623. p);
  7624. }
  7625. break;
  7626. default:
  7627. return -EINVAL;
  7628. }
  7629. adapter->bridge_mode = mode;
  7630. e_info(drv, "enabling bridge mode: %s\n",
  7631. mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
  7632. return 0;
  7633. }
  7634. static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
  7635. struct nlmsghdr *nlh, u16 flags)
  7636. {
  7637. struct ixgbe_adapter *adapter = netdev_priv(dev);
  7638. struct nlattr *attr, *br_spec;
  7639. int rem;
  7640. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  7641. return -EOPNOTSUPP;
  7642. br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  7643. if (!br_spec)
  7644. return -EINVAL;
  7645. nla_for_each_nested(attr, br_spec, rem) {
  7646. int status;
  7647. __u16 mode;
  7648. if (nla_type(attr) != IFLA_BRIDGE_MODE)
  7649. continue;
  7650. if (nla_len(attr) < sizeof(mode))
  7651. return -EINVAL;
  7652. mode = nla_get_u16(attr);
  7653. status = ixgbe_configure_bridge_mode(adapter, mode);
  7654. if (status)
  7655. return status;
  7656. break;
  7657. }
  7658. return 0;
  7659. }
  7660. static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
  7661. struct net_device *dev,
  7662. u32 filter_mask, int nlflags)
  7663. {
  7664. struct ixgbe_adapter *adapter = netdev_priv(dev);
  7665. if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
  7666. return 0;
  7667. return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
  7668. adapter->bridge_mode, 0, 0, nlflags,
  7669. filter_mask, NULL);
  7670. }
  7671. static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
  7672. {
  7673. struct ixgbe_fwd_adapter *fwd_adapter = NULL;
  7674. struct ixgbe_adapter *adapter = netdev_priv(pdev);
  7675. int used_pools = adapter->num_vfs + adapter->num_rx_pools;
  7676. unsigned int limit;
  7677. int pool, err;
  7678. /* Hardware has a limited number of available pools. Each VF, and the
  7679. * PF require a pool. Check to ensure we don't attempt to use more
  7680. * then the available number of pools.
  7681. */
  7682. if (used_pools >= IXGBE_MAX_VF_FUNCTIONS)
  7683. return ERR_PTR(-EINVAL);
  7684. #ifdef CONFIG_RPS
  7685. if (vdev->num_rx_queues != vdev->num_tx_queues) {
  7686. netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n",
  7687. vdev->name);
  7688. return ERR_PTR(-EINVAL);
  7689. }
  7690. #endif
  7691. /* Check for hardware restriction on number of rx/tx queues */
  7692. if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES ||
  7693. vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) {
  7694. netdev_info(pdev,
  7695. "%s: Supports RX/TX Queue counts 1,2, and 4\n",
  7696. pdev->name);
  7697. return ERR_PTR(-EINVAL);
  7698. }
  7699. if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
  7700. adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) ||
  7701. (adapter->num_rx_pools > IXGBE_MAX_MACVLANS))
  7702. return ERR_PTR(-EBUSY);
  7703. fwd_adapter = kzalloc(sizeof(*fwd_adapter), GFP_KERNEL);
  7704. if (!fwd_adapter)
  7705. return ERR_PTR(-ENOMEM);
  7706. pool = find_first_zero_bit(&adapter->fwd_bitmask, 32);
  7707. adapter->num_rx_pools++;
  7708. set_bit(pool, &adapter->fwd_bitmask);
  7709. limit = find_last_bit(&adapter->fwd_bitmask, 32);
  7710. /* Enable VMDq flag so device will be set in VM mode */
  7711. adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED;
  7712. adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
  7713. adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues;
  7714. /* Force reinit of ring allocation with VMDQ enabled */
  7715. err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
  7716. if (err)
  7717. goto fwd_add_err;
  7718. fwd_adapter->pool = pool;
  7719. fwd_adapter->real_adapter = adapter;
  7720. if (netif_running(pdev)) {
  7721. err = ixgbe_fwd_ring_up(vdev, fwd_adapter);
  7722. if (err)
  7723. goto fwd_add_err;
  7724. netif_tx_start_all_queues(vdev);
  7725. }
  7726. return fwd_adapter;
  7727. fwd_add_err:
  7728. /* unwind counter and free adapter struct */
  7729. netdev_info(pdev,
  7730. "%s: dfwd hardware acceleration failed\n", vdev->name);
  7731. clear_bit(pool, &adapter->fwd_bitmask);
  7732. adapter->num_rx_pools--;
  7733. kfree(fwd_adapter);
  7734. return ERR_PTR(err);
  7735. }
  7736. static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
  7737. {
  7738. struct ixgbe_fwd_adapter *fwd_adapter = priv;
  7739. struct ixgbe_adapter *adapter = fwd_adapter->real_adapter;
  7740. unsigned int limit;
  7741. clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask);
  7742. adapter->num_rx_pools--;
  7743. limit = find_last_bit(&adapter->fwd_bitmask, 32);
  7744. adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
  7745. ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
  7746. ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
  7747. netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
  7748. fwd_adapter->pool, adapter->num_rx_pools,
  7749. fwd_adapter->rx_base_queue,
  7750. fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool,
  7751. adapter->fwd_bitmask);
  7752. kfree(fwd_adapter);
  7753. }
  7754. #define IXGBE_MAX_MAC_HDR_LEN 127
  7755. #define IXGBE_MAX_NETWORK_HDR_LEN 511
  7756. static netdev_features_t
  7757. ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
  7758. netdev_features_t features)
  7759. {
  7760. unsigned int network_hdr_len, mac_hdr_len;
  7761. /* Make certain the headers can be described by a context descriptor */
  7762. mac_hdr_len = skb_network_header(skb) - skb->data;
  7763. if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
  7764. return features & ~(NETIF_F_HW_CSUM |
  7765. NETIF_F_SCTP_CRC |
  7766. NETIF_F_HW_VLAN_CTAG_TX |
  7767. NETIF_F_TSO |
  7768. NETIF_F_TSO6);
  7769. network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
  7770. if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
  7771. return features & ~(NETIF_F_HW_CSUM |
  7772. NETIF_F_SCTP_CRC |
  7773. NETIF_F_TSO |
  7774. NETIF_F_TSO6);
  7775. /* We can only support IPV4 TSO in tunnels if we can mangle the
  7776. * inner IP ID field, so strip TSO if MANGLEID is not supported.
  7777. */
  7778. if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
  7779. features &= ~NETIF_F_TSO;
  7780. return features;
  7781. }
  7782. static const struct net_device_ops ixgbe_netdev_ops = {
  7783. .ndo_open = ixgbe_open,
  7784. .ndo_stop = ixgbe_close,
  7785. .ndo_start_xmit = ixgbe_xmit_frame,
  7786. .ndo_select_queue = ixgbe_select_queue,
  7787. .ndo_set_rx_mode = ixgbe_set_rx_mode,
  7788. .ndo_validate_addr = eth_validate_addr,
  7789. .ndo_set_mac_address = ixgbe_set_mac,
  7790. .ndo_change_mtu = ixgbe_change_mtu,
  7791. .ndo_tx_timeout = ixgbe_tx_timeout,
  7792. .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
  7793. .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
  7794. .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
  7795. .ndo_do_ioctl = ixgbe_ioctl,
  7796. .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
  7797. .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
  7798. .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
  7799. .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
  7800. .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en,
  7801. .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust,
  7802. .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
  7803. .ndo_get_stats64 = ixgbe_get_stats64,
  7804. .ndo_setup_tc = __ixgbe_setup_tc,
  7805. #ifdef CONFIG_NET_POLL_CONTROLLER
  7806. .ndo_poll_controller = ixgbe_netpoll,
  7807. #endif
  7808. #ifdef CONFIG_NET_RX_BUSY_POLL
  7809. .ndo_busy_poll = ixgbe_low_latency_recv,
  7810. #endif
  7811. #ifdef IXGBE_FCOE
  7812. .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
  7813. .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
  7814. .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
  7815. .ndo_fcoe_enable = ixgbe_fcoe_enable,
  7816. .ndo_fcoe_disable = ixgbe_fcoe_disable,
  7817. .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
  7818. .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
  7819. #endif /* IXGBE_FCOE */
  7820. .ndo_set_features = ixgbe_set_features,
  7821. .ndo_fix_features = ixgbe_fix_features,
  7822. .ndo_fdb_add = ixgbe_ndo_fdb_add,
  7823. .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
  7824. .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
  7825. .ndo_dfwd_add_station = ixgbe_fwd_add,
  7826. .ndo_dfwd_del_station = ixgbe_fwd_del,
  7827. .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port,
  7828. .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
  7829. .ndo_features_check = ixgbe_features_check,
  7830. };
  7831. /**
  7832. * ixgbe_enumerate_functions - Get the number of ports this device has
  7833. * @adapter: adapter structure
  7834. *
  7835. * This function enumerates the phsyical functions co-located on a single slot,
  7836. * in order to determine how many ports a device has. This is most useful in
  7837. * determining the required GT/s of PCIe bandwidth necessary for optimal
  7838. * performance.
  7839. **/
  7840. static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
  7841. {
  7842. struct pci_dev *entry, *pdev = adapter->pdev;
  7843. int physfns = 0;
  7844. /* Some cards can not use the generic count PCIe functions method,
  7845. * because they are behind a parent switch, so we hardcode these with
  7846. * the correct number of functions.
  7847. */
  7848. if (ixgbe_pcie_from_parent(&adapter->hw))
  7849. physfns = 4;
  7850. list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
  7851. /* don't count virtual functions */
  7852. if (entry->is_virtfn)
  7853. continue;
  7854. /* When the devices on the bus don't all match our device ID,
  7855. * we can't reliably determine the correct number of
  7856. * functions. This can occur if a function has been direct
  7857. * attached to a virtual machine using VT-d, for example. In
  7858. * this case, simply return -1 to indicate this.
  7859. */
  7860. if ((entry->vendor != pdev->vendor) ||
  7861. (entry->device != pdev->device))
  7862. return -1;
  7863. physfns++;
  7864. }
  7865. return physfns;
  7866. }
  7867. /**
  7868. * ixgbe_wol_supported - Check whether device supports WoL
  7869. * @adapter: the adapter private structure
  7870. * @device_id: the device ID
  7871. * @subdev_id: the subsystem device ID
  7872. *
  7873. * This function is used by probe and ethtool to determine
  7874. * which devices have WoL support
  7875. *
  7876. **/
  7877. bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
  7878. u16 subdevice_id)
  7879. {
  7880. struct ixgbe_hw *hw = &adapter->hw;
  7881. u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
  7882. /* WOL not supported on 82598 */
  7883. if (hw->mac.type == ixgbe_mac_82598EB)
  7884. return false;
  7885. /* check eeprom to see if WOL is enabled for X540 and newer */
  7886. if (hw->mac.type >= ixgbe_mac_X540) {
  7887. if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
  7888. ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
  7889. (hw->bus.func == 0)))
  7890. return true;
  7891. }
  7892. /* WOL is determined based on device IDs for 82599 MACs */
  7893. switch (device_id) {
  7894. case IXGBE_DEV_ID_82599_SFP:
  7895. /* Only these subdevices could supports WOL */
  7896. switch (subdevice_id) {
  7897. case IXGBE_SUBDEV_ID_82599_560FLR:
  7898. case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
  7899. case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
  7900. case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
  7901. /* only support first port */
  7902. if (hw->bus.func != 0)
  7903. break;
  7904. case IXGBE_SUBDEV_ID_82599_SP_560FLR:
  7905. case IXGBE_SUBDEV_ID_82599_SFP:
  7906. case IXGBE_SUBDEV_ID_82599_RNDC:
  7907. case IXGBE_SUBDEV_ID_82599_ECNA_DP:
  7908. case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
  7909. case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
  7910. case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
  7911. return true;
  7912. }
  7913. break;
  7914. case IXGBE_DEV_ID_82599EN_SFP:
  7915. /* Only these subdevices support WOL */
  7916. switch (subdevice_id) {
  7917. case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
  7918. return true;
  7919. }
  7920. break;
  7921. case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
  7922. /* All except this subdevice support WOL */
  7923. if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
  7924. return true;
  7925. break;
  7926. case IXGBE_DEV_ID_82599_KX4:
  7927. return true;
  7928. default:
  7929. break;
  7930. }
  7931. return false;
  7932. }
  7933. /**
  7934. * ixgbe_probe - Device Initialization Routine
  7935. * @pdev: PCI device information struct
  7936. * @ent: entry in ixgbe_pci_tbl
  7937. *
  7938. * Returns 0 on success, negative on failure
  7939. *
  7940. * ixgbe_probe initializes an adapter identified by a pci_dev structure.
  7941. * The OS initialization, configuring of the adapter private structure,
  7942. * and a hardware reset occur.
  7943. **/
  7944. static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  7945. {
  7946. struct net_device *netdev;
  7947. struct ixgbe_adapter *adapter = NULL;
  7948. struct ixgbe_hw *hw;
  7949. const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
  7950. int i, err, pci_using_dac, expected_gts;
  7951. unsigned int indices = MAX_TX_QUEUES;
  7952. u8 part_str[IXGBE_PBANUM_LENGTH];
  7953. bool disable_dev = false;
  7954. #ifdef IXGBE_FCOE
  7955. u16 device_caps;
  7956. #endif
  7957. u32 eec;
  7958. /* Catch broken hardware that put the wrong VF device ID in
  7959. * the PCIe SR-IOV capability.
  7960. */
  7961. if (pdev->is_virtfn) {
  7962. WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
  7963. pci_name(pdev), pdev->vendor, pdev->device);
  7964. return -EINVAL;
  7965. }
  7966. err = pci_enable_device_mem(pdev);
  7967. if (err)
  7968. return err;
  7969. if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
  7970. pci_using_dac = 1;
  7971. } else {
  7972. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  7973. if (err) {
  7974. dev_err(&pdev->dev,
  7975. "No usable DMA configuration, aborting\n");
  7976. goto err_dma;
  7977. }
  7978. pci_using_dac = 0;
  7979. }
  7980. err = pci_request_mem_regions(pdev, ixgbe_driver_name);
  7981. if (err) {
  7982. dev_err(&pdev->dev,
  7983. "pci_request_selected_regions failed 0x%x\n", err);
  7984. goto err_pci_reg;
  7985. }
  7986. pci_enable_pcie_error_reporting(pdev);
  7987. pci_set_master(pdev);
  7988. pci_save_state(pdev);
  7989. if (ii->mac == ixgbe_mac_82598EB) {
  7990. #ifdef CONFIG_IXGBE_DCB
  7991. /* 8 TC w/ 4 queues per TC */
  7992. indices = 4 * MAX_TRAFFIC_CLASS;
  7993. #else
  7994. indices = IXGBE_MAX_RSS_INDICES;
  7995. #endif
  7996. }
  7997. netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
  7998. if (!netdev) {
  7999. err = -ENOMEM;
  8000. goto err_alloc_etherdev;
  8001. }
  8002. SET_NETDEV_DEV(netdev, &pdev->dev);
  8003. adapter = netdev_priv(netdev);
  8004. adapter->netdev = netdev;
  8005. adapter->pdev = pdev;
  8006. hw = &adapter->hw;
  8007. hw->back = adapter;
  8008. adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  8009. hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
  8010. pci_resource_len(pdev, 0));
  8011. adapter->io_addr = hw->hw_addr;
  8012. if (!hw->hw_addr) {
  8013. err = -EIO;
  8014. goto err_ioremap;
  8015. }
  8016. netdev->netdev_ops = &ixgbe_netdev_ops;
  8017. ixgbe_set_ethtool_ops(netdev);
  8018. netdev->watchdog_timeo = 5 * HZ;
  8019. strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
  8020. /* Setup hw api */
  8021. hw->mac.ops = *ii->mac_ops;
  8022. hw->mac.type = ii->mac;
  8023. hw->mvals = ii->mvals;
  8024. /* EEPROM */
  8025. hw->eeprom.ops = *ii->eeprom_ops;
  8026. eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
  8027. if (ixgbe_removed(hw->hw_addr)) {
  8028. err = -EIO;
  8029. goto err_ioremap;
  8030. }
  8031. /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
  8032. if (!(eec & BIT(8)))
  8033. hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
  8034. /* PHY */
  8035. hw->phy.ops = *ii->phy_ops;
  8036. hw->phy.sfp_type = ixgbe_sfp_type_unknown;
  8037. /* ixgbe_identify_phy_generic will set prtad and mmds properly */
  8038. hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
  8039. hw->phy.mdio.mmds = 0;
  8040. hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
  8041. hw->phy.mdio.dev = netdev;
  8042. hw->phy.mdio.mdio_read = ixgbe_mdio_read;
  8043. hw->phy.mdio.mdio_write = ixgbe_mdio_write;
  8044. ii->get_invariants(hw);
  8045. /* setup the private structure */
  8046. err = ixgbe_sw_init(adapter);
  8047. if (err)
  8048. goto err_sw_init;
  8049. /* Make sure the SWFW semaphore is in a valid state */
  8050. if (hw->mac.ops.init_swfw_sync)
  8051. hw->mac.ops.init_swfw_sync(hw);
  8052. /* Make it possible the adapter to be woken up via WOL */
  8053. switch (adapter->hw.mac.type) {
  8054. case ixgbe_mac_82599EB:
  8055. case ixgbe_mac_X540:
  8056. case ixgbe_mac_X550:
  8057. case ixgbe_mac_X550EM_x:
  8058. case ixgbe_mac_x550em_a:
  8059. IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
  8060. break;
  8061. default:
  8062. break;
  8063. }
  8064. /*
  8065. * If there is a fan on this device and it has failed log the
  8066. * failure.
  8067. */
  8068. if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
  8069. u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
  8070. if (esdp & IXGBE_ESDP_SDP1)
  8071. e_crit(probe, "Fan has stopped, replace the adapter\n");
  8072. }
  8073. if (allow_unsupported_sfp)
  8074. hw->allow_unsupported_sfp = allow_unsupported_sfp;
  8075. /* reset_hw fills in the perm_addr as well */
  8076. hw->phy.reset_if_overtemp = true;
  8077. err = hw->mac.ops.reset_hw(hw);
  8078. hw->phy.reset_if_overtemp = false;
  8079. if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
  8080. err = 0;
  8081. } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
  8082. e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
  8083. e_dev_err("Reload the driver after installing a supported module.\n");
  8084. goto err_sw_init;
  8085. } else if (err) {
  8086. e_dev_err("HW Init failed: %d\n", err);
  8087. goto err_sw_init;
  8088. }
  8089. #ifdef CONFIG_PCI_IOV
  8090. /* SR-IOV not supported on the 82598 */
  8091. if (adapter->hw.mac.type == ixgbe_mac_82598EB)
  8092. goto skip_sriov;
  8093. /* Mailbox */
  8094. ixgbe_init_mbx_params_pf(hw);
  8095. hw->mbx.ops = ii->mbx_ops;
  8096. pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
  8097. ixgbe_enable_sriov(adapter);
  8098. skip_sriov:
  8099. #endif
  8100. netdev->features = NETIF_F_SG |
  8101. NETIF_F_TSO |
  8102. NETIF_F_TSO6 |
  8103. NETIF_F_RXHASH |
  8104. NETIF_F_RXCSUM |
  8105. NETIF_F_HW_CSUM;
  8106. #define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
  8107. NETIF_F_GSO_GRE_CSUM | \
  8108. NETIF_F_GSO_IPXIP4 | \
  8109. NETIF_F_GSO_IPXIP6 | \
  8110. NETIF_F_GSO_UDP_TUNNEL | \
  8111. NETIF_F_GSO_UDP_TUNNEL_CSUM)
  8112. netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
  8113. netdev->features |= NETIF_F_GSO_PARTIAL |
  8114. IXGBE_GSO_PARTIAL_FEATURES;
  8115. if (hw->mac.type >= ixgbe_mac_82599EB)
  8116. netdev->features |= NETIF_F_SCTP_CRC;
  8117. /* copy netdev features into list of user selectable features */
  8118. netdev->hw_features |= netdev->features |
  8119. NETIF_F_HW_VLAN_CTAG_FILTER |
  8120. NETIF_F_HW_VLAN_CTAG_RX |
  8121. NETIF_F_HW_VLAN_CTAG_TX |
  8122. NETIF_F_RXALL |
  8123. NETIF_F_HW_L2FW_DOFFLOAD;
  8124. if (hw->mac.type >= ixgbe_mac_82599EB)
  8125. netdev->hw_features |= NETIF_F_NTUPLE |
  8126. NETIF_F_HW_TC;
  8127. if (pci_using_dac)
  8128. netdev->features |= NETIF_F_HIGHDMA;
  8129. netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
  8130. netdev->hw_enc_features |= netdev->vlan_features;
  8131. netdev->mpls_features |= NETIF_F_HW_CSUM;
  8132. /* set this bit last since it cannot be part of vlan_features */
  8133. netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
  8134. NETIF_F_HW_VLAN_CTAG_RX |
  8135. NETIF_F_HW_VLAN_CTAG_TX;
  8136. netdev->priv_flags |= IFF_UNICAST_FLT;
  8137. netdev->priv_flags |= IFF_SUPP_NOFCS;
  8138. #ifdef CONFIG_IXGBE_DCB
  8139. if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
  8140. netdev->dcbnl_ops = &dcbnl_ops;
  8141. #endif
  8142. #ifdef IXGBE_FCOE
  8143. if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
  8144. unsigned int fcoe_l;
  8145. if (hw->mac.ops.get_device_caps) {
  8146. hw->mac.ops.get_device_caps(hw, &device_caps);
  8147. if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
  8148. adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
  8149. }
  8150. fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
  8151. adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
  8152. netdev->features |= NETIF_F_FSO |
  8153. NETIF_F_FCOE_CRC;
  8154. netdev->vlan_features |= NETIF_F_FSO |
  8155. NETIF_F_FCOE_CRC |
  8156. NETIF_F_FCOE_MTU;
  8157. }
  8158. #endif /* IXGBE_FCOE */
  8159. if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
  8160. netdev->hw_features |= NETIF_F_LRO;
  8161. if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
  8162. netdev->features |= NETIF_F_LRO;
  8163. /* make sure the EEPROM is good */
  8164. if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
  8165. e_dev_err("The EEPROM Checksum Is Not Valid\n");
  8166. err = -EIO;
  8167. goto err_sw_init;
  8168. }
  8169. eth_platform_get_mac_address(&adapter->pdev->dev,
  8170. adapter->hw.mac.perm_addr);
  8171. memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
  8172. if (!is_valid_ether_addr(netdev->dev_addr)) {
  8173. e_dev_err("invalid MAC address\n");
  8174. err = -EIO;
  8175. goto err_sw_init;
  8176. }
  8177. /* Set hw->mac.addr to permanent MAC address */
  8178. ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
  8179. ixgbe_mac_set_default_filter(adapter);
  8180. setup_timer(&adapter->service_timer, &ixgbe_service_timer,
  8181. (unsigned long) adapter);
  8182. if (ixgbe_removed(hw->hw_addr)) {
  8183. err = -EIO;
  8184. goto err_sw_init;
  8185. }
  8186. INIT_WORK(&adapter->service_task, ixgbe_service_task);
  8187. set_bit(__IXGBE_SERVICE_INITED, &adapter->state);
  8188. clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
  8189. err = ixgbe_init_interrupt_scheme(adapter);
  8190. if (err)
  8191. goto err_sw_init;
  8192. /* WOL not supported for all devices */
  8193. adapter->wol = 0;
  8194. hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
  8195. hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device,
  8196. pdev->subsystem_device);
  8197. if (hw->wol_enabled)
  8198. adapter->wol = IXGBE_WUFC_MAG;
  8199. device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
  8200. /* save off EEPROM version number */
  8201. hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
  8202. hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
  8203. /* pick up the PCI bus settings for reporting later */
  8204. if (ixgbe_pcie_from_parent(hw))
  8205. ixgbe_get_parent_bus_info(adapter);
  8206. else
  8207. hw->mac.ops.get_bus_info(hw);
  8208. /* calculate the expected PCIe bandwidth required for optimal
  8209. * performance. Note that some older parts will never have enough
  8210. * bandwidth due to being older generation PCIe parts. We clamp these
  8211. * parts to ensure no warning is displayed if it can't be fixed.
  8212. */
  8213. switch (hw->mac.type) {
  8214. case ixgbe_mac_82598EB:
  8215. expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
  8216. break;
  8217. default:
  8218. expected_gts = ixgbe_enumerate_functions(adapter) * 10;
  8219. break;
  8220. }
  8221. /* don't check link if we failed to enumerate functions */
  8222. if (expected_gts > 0)
  8223. ixgbe_check_minimum_link(adapter, expected_gts);
  8224. err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
  8225. if (err)
  8226. strlcpy(part_str, "Unknown", sizeof(part_str));
  8227. if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
  8228. e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
  8229. hw->mac.type, hw->phy.type, hw->phy.sfp_type,
  8230. part_str);
  8231. else
  8232. e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
  8233. hw->mac.type, hw->phy.type, part_str);
  8234. e_dev_info("%pM\n", netdev->dev_addr);
  8235. /* reset the hardware with the new settings */
  8236. err = hw->mac.ops.start_hw(hw);
  8237. if (err == IXGBE_ERR_EEPROM_VERSION) {
  8238. /* We are running on a pre-production device, log a warning */
  8239. e_dev_warn("This device is a pre-production adapter/LOM. "
  8240. "Please be aware there may be issues associated "
  8241. "with your hardware. If you are experiencing "
  8242. "problems please contact your Intel or hardware "
  8243. "representative who provided you with this "
  8244. "hardware.\n");
  8245. }
  8246. strcpy(netdev->name, "eth%d");
  8247. err = register_netdev(netdev);
  8248. if (err)
  8249. goto err_register;
  8250. pci_set_drvdata(pdev, adapter);
  8251. /* power down the optics for 82599 SFP+ fiber */
  8252. if (hw->mac.ops.disable_tx_laser)
  8253. hw->mac.ops.disable_tx_laser(hw);
  8254. /* carrier off reporting is important to ethtool even BEFORE open */
  8255. netif_carrier_off(netdev);
  8256. #ifdef CONFIG_IXGBE_DCA
  8257. if (dca_add_requester(&pdev->dev) == 0) {
  8258. adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
  8259. ixgbe_setup_dca(adapter);
  8260. }
  8261. #endif
  8262. if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
  8263. e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs);
  8264. for (i = 0; i < adapter->num_vfs; i++)
  8265. ixgbe_vf_configuration(pdev, (i | 0x10000000));
  8266. }
  8267. /* firmware requires driver version to be 0xFFFFFFFF
  8268. * since os does not support feature
  8269. */
  8270. if (hw->mac.ops.set_fw_drv_ver)
  8271. hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
  8272. 0xFF);
  8273. /* add san mac addr to netdev */
  8274. ixgbe_add_sanmac_netdev(netdev);
  8275. e_dev_info("%s\n", ixgbe_default_device_descr);
  8276. #ifdef CONFIG_IXGBE_HWMON
  8277. if (ixgbe_sysfs_init(adapter))
  8278. e_err(probe, "failed to allocate sysfs resources\n");
  8279. #endif /* CONFIG_IXGBE_HWMON */
  8280. ixgbe_dbg_adapter_init(adapter);
  8281. /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
  8282. if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
  8283. hw->mac.ops.setup_link(hw,
  8284. IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
  8285. true);
  8286. return 0;
  8287. err_register:
  8288. ixgbe_release_hw_control(adapter);
  8289. ixgbe_clear_interrupt_scheme(adapter);
  8290. err_sw_init:
  8291. ixgbe_disable_sriov(adapter);
  8292. adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
  8293. iounmap(adapter->io_addr);
  8294. kfree(adapter->jump_tables[0]);
  8295. kfree(adapter->mac_table);
  8296. err_ioremap:
  8297. disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
  8298. free_netdev(netdev);
  8299. err_alloc_etherdev:
  8300. pci_release_mem_regions(pdev);
  8301. err_pci_reg:
  8302. err_dma:
  8303. if (!adapter || disable_dev)
  8304. pci_disable_device(pdev);
  8305. return err;
  8306. }
  8307. /**
  8308. * ixgbe_remove - Device Removal Routine
  8309. * @pdev: PCI device information struct
  8310. *
  8311. * ixgbe_remove is called by the PCI subsystem to alert the driver
  8312. * that it should release a PCI device. The could be caused by a
  8313. * Hot-Plug event, or because the driver is going to be removed from
  8314. * memory.
  8315. **/
  8316. static void ixgbe_remove(struct pci_dev *pdev)
  8317. {
  8318. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  8319. struct net_device *netdev;
  8320. bool disable_dev;
  8321. int i;
  8322. /* if !adapter then we already cleaned up in probe */
  8323. if (!adapter)
  8324. return;
  8325. netdev = adapter->netdev;
  8326. ixgbe_dbg_adapter_exit(adapter);
  8327. set_bit(__IXGBE_REMOVING, &adapter->state);
  8328. cancel_work_sync(&adapter->service_task);
  8329. #ifdef CONFIG_IXGBE_DCA
  8330. if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
  8331. adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
  8332. dca_remove_requester(&pdev->dev);
  8333. IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL,
  8334. IXGBE_DCA_CTRL_DCA_DISABLE);
  8335. }
  8336. #endif
  8337. #ifdef CONFIG_IXGBE_HWMON
  8338. ixgbe_sysfs_exit(adapter);
  8339. #endif /* CONFIG_IXGBE_HWMON */
  8340. /* remove the added san mac */
  8341. ixgbe_del_sanmac_netdev(netdev);
  8342. #ifdef CONFIG_PCI_IOV
  8343. ixgbe_disable_sriov(adapter);
  8344. #endif
  8345. if (netdev->reg_state == NETREG_REGISTERED)
  8346. unregister_netdev(netdev);
  8347. ixgbe_clear_interrupt_scheme(adapter);
  8348. ixgbe_release_hw_control(adapter);
  8349. #ifdef CONFIG_DCB
  8350. kfree(adapter->ixgbe_ieee_pfc);
  8351. kfree(adapter->ixgbe_ieee_ets);
  8352. #endif
  8353. iounmap(adapter->io_addr);
  8354. pci_release_mem_regions(pdev);
  8355. e_dev_info("complete\n");
  8356. for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
  8357. if (adapter->jump_tables[i]) {
  8358. kfree(adapter->jump_tables[i]->input);
  8359. kfree(adapter->jump_tables[i]->mask);
  8360. }
  8361. kfree(adapter->jump_tables[i]);
  8362. }
  8363. kfree(adapter->mac_table);
  8364. disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
  8365. free_netdev(netdev);
  8366. pci_disable_pcie_error_reporting(pdev);
  8367. if (disable_dev)
  8368. pci_disable_device(pdev);
  8369. }
  8370. /**
  8371. * ixgbe_io_error_detected - called when PCI error is detected
  8372. * @pdev: Pointer to PCI device
  8373. * @state: The current pci connection state
  8374. *
  8375. * This function is called after a PCI bus error affecting
  8376. * this device has been detected.
  8377. */
  8378. static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
  8379. pci_channel_state_t state)
  8380. {
  8381. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  8382. struct net_device *netdev = adapter->netdev;
  8383. #ifdef CONFIG_PCI_IOV
  8384. struct ixgbe_hw *hw = &adapter->hw;
  8385. struct pci_dev *bdev, *vfdev;
  8386. u32 dw0, dw1, dw2, dw3;
  8387. int vf, pos;
  8388. u16 req_id, pf_func;
  8389. if (adapter->hw.mac.type == ixgbe_mac_82598EB ||
  8390. adapter->num_vfs == 0)
  8391. goto skip_bad_vf_detection;
  8392. bdev = pdev->bus->self;
  8393. while (bdev && (pci_pcie_type(bdev) != PCI_EXP_TYPE_ROOT_PORT))
  8394. bdev = bdev->bus->self;
  8395. if (!bdev)
  8396. goto skip_bad_vf_detection;
  8397. pos = pci_find_ext_capability(bdev, PCI_EXT_CAP_ID_ERR);
  8398. if (!pos)
  8399. goto skip_bad_vf_detection;
  8400. dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
  8401. dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
  8402. dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
  8403. dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
  8404. if (ixgbe_removed(hw->hw_addr))
  8405. goto skip_bad_vf_detection;
  8406. req_id = dw1 >> 16;
  8407. /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
  8408. if (!(req_id & 0x0080))
  8409. goto skip_bad_vf_detection;
  8410. pf_func = req_id & 0x01;
  8411. if ((pf_func & 1) == (pdev->devfn & 1)) {
  8412. unsigned int device_id;
  8413. vf = (req_id & 0x7F) >> 1;
  8414. e_dev_err("VF %d has caused a PCIe error\n", vf);
  8415. e_dev_err("TLP: dw0: %8.8x\tdw1: %8.8x\tdw2: "
  8416. "%8.8x\tdw3: %8.8x\n",
  8417. dw0, dw1, dw2, dw3);
  8418. switch (adapter->hw.mac.type) {
  8419. case ixgbe_mac_82599EB:
  8420. device_id = IXGBE_82599_VF_DEVICE_ID;
  8421. break;
  8422. case ixgbe_mac_X540:
  8423. device_id = IXGBE_X540_VF_DEVICE_ID;
  8424. break;
  8425. case ixgbe_mac_X550:
  8426. device_id = IXGBE_DEV_ID_X550_VF;
  8427. break;
  8428. case ixgbe_mac_X550EM_x:
  8429. device_id = IXGBE_DEV_ID_X550EM_X_VF;
  8430. break;
  8431. case ixgbe_mac_x550em_a:
  8432. device_id = IXGBE_DEV_ID_X550EM_A_VF;
  8433. break;
  8434. default:
  8435. device_id = 0;
  8436. break;
  8437. }
  8438. /* Find the pci device of the offending VF */
  8439. vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
  8440. while (vfdev) {
  8441. if (vfdev->devfn == (req_id & 0xFF))
  8442. break;
  8443. vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
  8444. device_id, vfdev);
  8445. }
  8446. /*
  8447. * There's a slim chance the VF could have been hot plugged,
  8448. * so if it is no longer present we don't need to issue the
  8449. * VFLR. Just clean up the AER in that case.
  8450. */
  8451. if (vfdev) {
  8452. ixgbe_issue_vf_flr(adapter, vfdev);
  8453. /* Free device reference count */
  8454. pci_dev_put(vfdev);
  8455. }
  8456. pci_cleanup_aer_uncorrect_error_status(pdev);
  8457. }
  8458. /*
  8459. * Even though the error may have occurred on the other port
  8460. * we still need to increment the vf error reference count for
  8461. * both ports because the I/O resume function will be called
  8462. * for both of them.
  8463. */
  8464. adapter->vferr_refcount++;
  8465. return PCI_ERS_RESULT_RECOVERED;
  8466. skip_bad_vf_detection:
  8467. #endif /* CONFIG_PCI_IOV */
  8468. if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
  8469. return PCI_ERS_RESULT_DISCONNECT;
  8470. rtnl_lock();
  8471. netif_device_detach(netdev);
  8472. if (state == pci_channel_io_perm_failure) {
  8473. rtnl_unlock();
  8474. return PCI_ERS_RESULT_DISCONNECT;
  8475. }
  8476. if (netif_running(netdev))
  8477. ixgbe_close_suspend(adapter);
  8478. if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
  8479. pci_disable_device(pdev);
  8480. rtnl_unlock();
  8481. /* Request a slot reset. */
  8482. return PCI_ERS_RESULT_NEED_RESET;
  8483. }
  8484. /**
  8485. * ixgbe_io_slot_reset - called after the pci bus has been reset.
  8486. * @pdev: Pointer to PCI device
  8487. *
  8488. * Restart the card from scratch, as if from a cold-boot.
  8489. */
  8490. static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
  8491. {
  8492. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  8493. pci_ers_result_t result;
  8494. int err;
  8495. if (pci_enable_device_mem(pdev)) {
  8496. e_err(probe, "Cannot re-enable PCI device after reset.\n");
  8497. result = PCI_ERS_RESULT_DISCONNECT;
  8498. } else {
  8499. smp_mb__before_atomic();
  8500. clear_bit(__IXGBE_DISABLED, &adapter->state);
  8501. adapter->hw.hw_addr = adapter->io_addr;
  8502. pci_set_master(pdev);
  8503. pci_restore_state(pdev);
  8504. pci_save_state(pdev);
  8505. pci_wake_from_d3(pdev, false);
  8506. ixgbe_reset(adapter);
  8507. IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
  8508. result = PCI_ERS_RESULT_RECOVERED;
  8509. }
  8510. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  8511. if (err) {
  8512. e_dev_err("pci_cleanup_aer_uncorrect_error_status "
  8513. "failed 0x%0x\n", err);
  8514. /* non-fatal, continue */
  8515. }
  8516. return result;
  8517. }
  8518. /**
  8519. * ixgbe_io_resume - called when traffic can start flowing again.
  8520. * @pdev: Pointer to PCI device
  8521. *
  8522. * This callback is called when the error recovery driver tells us that
  8523. * its OK to resume normal operation.
  8524. */
  8525. static void ixgbe_io_resume(struct pci_dev *pdev)
  8526. {
  8527. struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
  8528. struct net_device *netdev = adapter->netdev;
  8529. #ifdef CONFIG_PCI_IOV
  8530. if (adapter->vferr_refcount) {
  8531. e_info(drv, "Resuming after VF err\n");
  8532. adapter->vferr_refcount--;
  8533. return;
  8534. }
  8535. #endif
  8536. rtnl_lock();
  8537. if (netif_running(netdev))
  8538. ixgbe_open(netdev);
  8539. netif_device_attach(netdev);
  8540. rtnl_unlock();
  8541. }
  8542. static const struct pci_error_handlers ixgbe_err_handler = {
  8543. .error_detected = ixgbe_io_error_detected,
  8544. .slot_reset = ixgbe_io_slot_reset,
  8545. .resume = ixgbe_io_resume,
  8546. };
  8547. static struct pci_driver ixgbe_driver = {
  8548. .name = ixgbe_driver_name,
  8549. .id_table = ixgbe_pci_tbl,
  8550. .probe = ixgbe_probe,
  8551. .remove = ixgbe_remove,
  8552. #ifdef CONFIG_PM
  8553. .suspend = ixgbe_suspend,
  8554. .resume = ixgbe_resume,
  8555. #endif
  8556. .shutdown = ixgbe_shutdown,
  8557. .sriov_configure = ixgbe_pci_sriov_configure,
  8558. .err_handler = &ixgbe_err_handler
  8559. };
  8560. /**
  8561. * ixgbe_init_module - Driver Registration Routine
  8562. *
  8563. * ixgbe_init_module is the first routine called when the driver is
  8564. * loaded. All it does is register with the PCI subsystem.
  8565. **/
  8566. static int __init ixgbe_init_module(void)
  8567. {
  8568. int ret;
  8569. pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
  8570. pr_info("%s\n", ixgbe_copyright);
  8571. ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
  8572. if (!ixgbe_wq) {
  8573. pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
  8574. return -ENOMEM;
  8575. }
  8576. ixgbe_dbg_init();
  8577. ret = pci_register_driver(&ixgbe_driver);
  8578. if (ret) {
  8579. destroy_workqueue(ixgbe_wq);
  8580. ixgbe_dbg_exit();
  8581. return ret;
  8582. }
  8583. #ifdef CONFIG_IXGBE_DCA
  8584. dca_register_notify(&dca_notifier);
  8585. #endif
  8586. return 0;
  8587. }
  8588. module_init(ixgbe_init_module);
  8589. /**
  8590. * ixgbe_exit_module - Driver Exit Cleanup Routine
  8591. *
  8592. * ixgbe_exit_module is called just before the driver is removed
  8593. * from memory.
  8594. **/
  8595. static void __exit ixgbe_exit_module(void)
  8596. {
  8597. #ifdef CONFIG_IXGBE_DCA
  8598. dca_unregister_notify(&dca_notifier);
  8599. #endif
  8600. pci_unregister_driver(&ixgbe_driver);
  8601. ixgbe_dbg_exit();
  8602. if (ixgbe_wq) {
  8603. destroy_workqueue(ixgbe_wq);
  8604. ixgbe_wq = NULL;
  8605. }
  8606. }
  8607. #ifdef CONFIG_IXGBE_DCA
  8608. static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
  8609. void *p)
  8610. {
  8611. int ret_val;
  8612. ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
  8613. __ixgbe_notify_dca);
  8614. return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
  8615. }
  8616. #endif /* CONFIG_IXGBE_DCA */
  8617. module_exit(ixgbe_exit_module);
  8618. /* ixgbe_main.c */