bnx2x_main.c 274 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305
  1. /* bnx2x_main.c: Broadcom Everest network driver.
  2. *
  3. * Copyright (c) 2007-2011 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. *
  9. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  10. * Written by: Eliezer Tamir
  11. * Based on code from Michael Chan's bnx2 driver
  12. * UDP CSUM errata workaround by Arik Gendelman
  13. * Slowpath and fastpath rework by Vladislav Zolotarov
  14. * Statistics and Link management by Yitchak Gertner
  15. *
  16. */
  17. #include <linux/module.h>
  18. #include <linux/moduleparam.h>
  19. #include <linux/kernel.h>
  20. #include <linux/device.h> /* for dev_info() */
  21. #include <linux/timer.h>
  22. #include <linux/errno.h>
  23. #include <linux/ioport.h>
  24. #include <linux/slab.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/pci.h>
  27. #include <linux/init.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/dma-mapping.h>
  32. #include <linux/bitops.h>
  33. #include <linux/irq.h>
  34. #include <linux/delay.h>
  35. #include <asm/byteorder.h>
  36. #include <linux/time.h>
  37. #include <linux/ethtool.h>
  38. #include <linux/mii.h>
  39. #include <linux/if_vlan.h>
  40. #include <net/ip.h>
  41. #include <net/tcp.h>
  42. #include <net/checksum.h>
  43. #include <net/ip6_checksum.h>
  44. #include <linux/workqueue.h>
  45. #include <linux/crc32.h>
  46. #include <linux/crc32c.h>
  47. #include <linux/prefetch.h>
  48. #include <linux/zlib.h>
  49. #include <linux/io.h>
  50. #include <linux/stringify.h>
  51. #include <linux/vmalloc.h>
  52. #define BNX2X_MAIN
  53. #include "bnx2x.h"
  54. #include "bnx2x_init.h"
  55. #include "bnx2x_init_ops.h"
  56. #include "bnx2x_cmn.h"
  57. #include "bnx2x_dcb.h"
  58. #include <linux/firmware.h>
  59. #include "bnx2x_fw_file_hdr.h"
  60. /* FW files */
  61. #define FW_FILE_VERSION \
  62. __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
  63. __stringify(BCM_5710_FW_MINOR_VERSION) "." \
  64. __stringify(BCM_5710_FW_REVISION_VERSION) "." \
  65. __stringify(BCM_5710_FW_ENGINEERING_VERSION)
  66. #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
  67. #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
  68. #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
  69. /* Time in jiffies before concluding the transmitter is hung */
  70. #define TX_TIMEOUT (5*HZ)
  71. static char version[] __devinitdata =
  72. "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
  73. DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  74. MODULE_AUTHOR("Eliezer Tamir");
  75. MODULE_DESCRIPTION("Broadcom NetXtreme II "
  76. "BCM57710/57711/57711E/57712/57712E Driver");
  77. MODULE_LICENSE("GPL");
  78. MODULE_VERSION(DRV_MODULE_VERSION);
  79. MODULE_FIRMWARE(FW_FILE_NAME_E1);
  80. MODULE_FIRMWARE(FW_FILE_NAME_E1H);
  81. MODULE_FIRMWARE(FW_FILE_NAME_E2);
  82. static int multi_mode = 1;
  83. module_param(multi_mode, int, 0);
  84. MODULE_PARM_DESC(multi_mode, " Multi queue mode "
  85. "(0 Disable; 1 Enable (default))");
  86. int num_queues;
  87. module_param(num_queues, int, 0);
  88. MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
  89. " (default is as a number of CPUs)");
  90. static int disable_tpa;
  91. module_param(disable_tpa, int, 0);
  92. MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
  93. static int int_mode;
  94. module_param(int_mode, int, 0);
  95. MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
  96. "(1 INT#x; 2 MSI)");
  97. static int dropless_fc;
  98. module_param(dropless_fc, int, 0);
  99. MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
  100. static int poll;
  101. module_param(poll, int, 0);
  102. MODULE_PARM_DESC(poll, " Use polling (for debug)");
  103. static int mrrs = -1;
  104. module_param(mrrs, int, 0);
  105. MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
  106. static int debug;
  107. module_param(debug, int, 0);
  108. MODULE_PARM_DESC(debug, " Default debug msglevel");
  109. static struct workqueue_struct *bnx2x_wq;
  110. #ifdef BCM_CNIC
  111. static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
  112. #endif
  113. enum bnx2x_board_type {
  114. BCM57710 = 0,
  115. BCM57711 = 1,
  116. BCM57711E = 2,
  117. BCM57712 = 3,
  118. BCM57712E = 4
  119. };
  120. /* indexed by board_type, above */
  121. static struct {
  122. char *name;
  123. } board_info[] __devinitdata = {
  124. { "Broadcom NetXtreme II BCM57710 XGb" },
  125. { "Broadcom NetXtreme II BCM57711 XGb" },
  126. { "Broadcom NetXtreme II BCM57711E XGb" },
  127. { "Broadcom NetXtreme II BCM57712 XGb" },
  128. { "Broadcom NetXtreme II BCM57712E XGb" }
  129. };
  130. static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
  131. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
  132. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
  133. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
  134. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
  135. { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
  136. { 0 }
  137. };
  138. MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
  139. /****************************************************************************
  140. * General service functions
  141. ****************************************************************************/
  142. static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
  143. u32 addr, dma_addr_t mapping)
  144. {
  145. REG_WR(bp, addr, U64_LO(mapping));
  146. REG_WR(bp, addr + 4, U64_HI(mapping));
  147. }
  148. static inline void __storm_memset_fill(struct bnx2x *bp,
  149. u32 addr, size_t size, u32 val)
  150. {
  151. int i;
  152. for (i = 0; i < size/4; i++)
  153. REG_WR(bp, addr + (i * 4), val);
  154. }
  155. static inline void storm_memset_ustats_zero(struct bnx2x *bp,
  156. u8 port, u16 stat_id)
  157. {
  158. size_t size = sizeof(struct ustorm_per_client_stats);
  159. u32 addr = BAR_USTRORM_INTMEM +
  160. USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
  161. __storm_memset_fill(bp, addr, size, 0);
  162. }
  163. static inline void storm_memset_tstats_zero(struct bnx2x *bp,
  164. u8 port, u16 stat_id)
  165. {
  166. size_t size = sizeof(struct tstorm_per_client_stats);
  167. u32 addr = BAR_TSTRORM_INTMEM +
  168. TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
  169. __storm_memset_fill(bp, addr, size, 0);
  170. }
  171. static inline void storm_memset_xstats_zero(struct bnx2x *bp,
  172. u8 port, u16 stat_id)
  173. {
  174. size_t size = sizeof(struct xstorm_per_client_stats);
  175. u32 addr = BAR_XSTRORM_INTMEM +
  176. XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
  177. __storm_memset_fill(bp, addr, size, 0);
  178. }
  179. static inline void storm_memset_spq_addr(struct bnx2x *bp,
  180. dma_addr_t mapping, u16 abs_fid)
  181. {
  182. u32 addr = XSEM_REG_FAST_MEMORY +
  183. XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
  184. __storm_memset_dma_mapping(bp, addr, mapping);
  185. }
  186. static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
  187. {
  188. REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
  189. }
  190. static inline void storm_memset_func_cfg(struct bnx2x *bp,
  191. struct tstorm_eth_function_common_config *tcfg,
  192. u16 abs_fid)
  193. {
  194. size_t size = sizeof(struct tstorm_eth_function_common_config);
  195. u32 addr = BAR_TSTRORM_INTMEM +
  196. TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
  197. __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
  198. }
  199. static inline void storm_memset_xstats_flags(struct bnx2x *bp,
  200. struct stats_indication_flags *flags,
  201. u16 abs_fid)
  202. {
  203. size_t size = sizeof(struct stats_indication_flags);
  204. u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
  205. __storm_memset_struct(bp, addr, size, (u32 *)flags);
  206. }
  207. static inline void storm_memset_tstats_flags(struct bnx2x *bp,
  208. struct stats_indication_flags *flags,
  209. u16 abs_fid)
  210. {
  211. size_t size = sizeof(struct stats_indication_flags);
  212. u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
  213. __storm_memset_struct(bp, addr, size, (u32 *)flags);
  214. }
  215. static inline void storm_memset_ustats_flags(struct bnx2x *bp,
  216. struct stats_indication_flags *flags,
  217. u16 abs_fid)
  218. {
  219. size_t size = sizeof(struct stats_indication_flags);
  220. u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
  221. __storm_memset_struct(bp, addr, size, (u32 *)flags);
  222. }
  223. static inline void storm_memset_cstats_flags(struct bnx2x *bp,
  224. struct stats_indication_flags *flags,
  225. u16 abs_fid)
  226. {
  227. size_t size = sizeof(struct stats_indication_flags);
  228. u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
  229. __storm_memset_struct(bp, addr, size, (u32 *)flags);
  230. }
  231. static inline void storm_memset_xstats_addr(struct bnx2x *bp,
  232. dma_addr_t mapping, u16 abs_fid)
  233. {
  234. u32 addr = BAR_XSTRORM_INTMEM +
  235. XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
  236. __storm_memset_dma_mapping(bp, addr, mapping);
  237. }
  238. static inline void storm_memset_tstats_addr(struct bnx2x *bp,
  239. dma_addr_t mapping, u16 abs_fid)
  240. {
  241. u32 addr = BAR_TSTRORM_INTMEM +
  242. TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
  243. __storm_memset_dma_mapping(bp, addr, mapping);
  244. }
  245. static inline void storm_memset_ustats_addr(struct bnx2x *bp,
  246. dma_addr_t mapping, u16 abs_fid)
  247. {
  248. u32 addr = BAR_USTRORM_INTMEM +
  249. USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
  250. __storm_memset_dma_mapping(bp, addr, mapping);
  251. }
  252. static inline void storm_memset_cstats_addr(struct bnx2x *bp,
  253. dma_addr_t mapping, u16 abs_fid)
  254. {
  255. u32 addr = BAR_CSTRORM_INTMEM +
  256. CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
  257. __storm_memset_dma_mapping(bp, addr, mapping);
  258. }
  259. static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
  260. u16 pf_id)
  261. {
  262. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
  263. pf_id);
  264. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
  265. pf_id);
  266. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
  267. pf_id);
  268. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
  269. pf_id);
  270. }
  271. static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
  272. u8 enable)
  273. {
  274. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
  275. enable);
  276. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
  277. enable);
  278. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
  279. enable);
  280. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
  281. enable);
  282. }
  283. static inline void storm_memset_eq_data(struct bnx2x *bp,
  284. struct event_ring_data *eq_data,
  285. u16 pfid)
  286. {
  287. size_t size = sizeof(struct event_ring_data);
  288. u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
  289. __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
  290. }
  291. static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
  292. u16 pfid)
  293. {
  294. u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
  295. REG_WR16(bp, addr, eq_prod);
  296. }
  297. static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
  298. u16 fw_sb_id, u8 sb_index,
  299. u8 ticks)
  300. {
  301. int index_offset = CHIP_IS_E2(bp) ?
  302. offsetof(struct hc_status_block_data_e2, index_data) :
  303. offsetof(struct hc_status_block_data_e1x, index_data);
  304. u32 addr = BAR_CSTRORM_INTMEM +
  305. CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
  306. index_offset +
  307. sizeof(struct hc_index_data)*sb_index +
  308. offsetof(struct hc_index_data, timeout);
  309. REG_WR8(bp, addr, ticks);
  310. DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
  311. port, fw_sb_id, sb_index, ticks);
  312. }
  313. static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
  314. u16 fw_sb_id, u8 sb_index,
  315. u8 disable)
  316. {
  317. u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
  318. int index_offset = CHIP_IS_E2(bp) ?
  319. offsetof(struct hc_status_block_data_e2, index_data) :
  320. offsetof(struct hc_status_block_data_e1x, index_data);
  321. u32 addr = BAR_CSTRORM_INTMEM +
  322. CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
  323. index_offset +
  324. sizeof(struct hc_index_data)*sb_index +
  325. offsetof(struct hc_index_data, flags);
  326. u16 flags = REG_RD16(bp, addr);
  327. /* clear and set */
  328. flags &= ~HC_INDEX_DATA_HC_ENABLED;
  329. flags |= enable_flag;
  330. REG_WR16(bp, addr, flags);
  331. DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
  332. port, fw_sb_id, sb_index, disable);
  333. }
  334. /* used only at init
  335. * locking is done by mcp
  336. */
  337. static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
  338. {
  339. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
  340. pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
  341. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
  342. PCICFG_VENDOR_ID_OFFSET);
  343. }
  344. static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
  345. {
  346. u32 val;
  347. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
  348. pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
  349. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
  350. PCICFG_VENDOR_ID_OFFSET);
  351. return val;
  352. }
  353. #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
  354. #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
  355. #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
  356. #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
  357. #define DMAE_DP_DST_NONE "dst_addr [none]"
  358. static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
  359. int msglvl)
  360. {
  361. u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
  362. switch (dmae->opcode & DMAE_COMMAND_DST) {
  363. case DMAE_CMD_DST_PCI:
  364. if (src_type == DMAE_CMD_SRC_PCI)
  365. DP(msglvl, "DMAE: opcode 0x%08x\n"
  366. "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
  367. "comp_addr [%x:%08x], comp_val 0x%08x\n",
  368. dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
  369. dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
  370. dmae->comp_addr_hi, dmae->comp_addr_lo,
  371. dmae->comp_val);
  372. else
  373. DP(msglvl, "DMAE: opcode 0x%08x\n"
  374. "src [%08x], len [%d*4], dst [%x:%08x]\n"
  375. "comp_addr [%x:%08x], comp_val 0x%08x\n",
  376. dmae->opcode, dmae->src_addr_lo >> 2,
  377. dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
  378. dmae->comp_addr_hi, dmae->comp_addr_lo,
  379. dmae->comp_val);
  380. break;
  381. case DMAE_CMD_DST_GRC:
  382. if (src_type == DMAE_CMD_SRC_PCI)
  383. DP(msglvl, "DMAE: opcode 0x%08x\n"
  384. "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
  385. "comp_addr [%x:%08x], comp_val 0x%08x\n",
  386. dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
  387. dmae->len, dmae->dst_addr_lo >> 2,
  388. dmae->comp_addr_hi, dmae->comp_addr_lo,
  389. dmae->comp_val);
  390. else
  391. DP(msglvl, "DMAE: opcode 0x%08x\n"
  392. "src [%08x], len [%d*4], dst [%08x]\n"
  393. "comp_addr [%x:%08x], comp_val 0x%08x\n",
  394. dmae->opcode, dmae->src_addr_lo >> 2,
  395. dmae->len, dmae->dst_addr_lo >> 2,
  396. dmae->comp_addr_hi, dmae->comp_addr_lo,
  397. dmae->comp_val);
  398. break;
  399. default:
  400. if (src_type == DMAE_CMD_SRC_PCI)
  401. DP(msglvl, "DMAE: opcode 0x%08x\n"
  402. DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
  403. "dst_addr [none]\n"
  404. DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
  405. dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
  406. dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
  407. dmae->comp_val);
  408. else
  409. DP(msglvl, "DMAE: opcode 0x%08x\n"
  410. DP_LEVEL "src_addr [%08x] len [%d * 4] "
  411. "dst_addr [none]\n"
  412. DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
  413. dmae->opcode, dmae->src_addr_lo >> 2,
  414. dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
  415. dmae->comp_val);
  416. break;
  417. }
  418. }
  419. const u32 dmae_reg_go_c[] = {
  420. DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
  421. DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
  422. DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
  423. DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
  424. };
  425. /* copy command into DMAE command memory and set DMAE command go */
  426. void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
  427. {
  428. u32 cmd_offset;
  429. int i;
  430. cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
  431. for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
  432. REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
  433. DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
  434. idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
  435. }
  436. REG_WR(bp, dmae_reg_go_c[idx], 1);
  437. }
  438. u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
  439. {
  440. return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
  441. DMAE_CMD_C_ENABLE);
  442. }
  443. u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
  444. {
  445. return opcode & ~DMAE_CMD_SRC_RESET;
  446. }
  447. u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
  448. bool with_comp, u8 comp_type)
  449. {
  450. u32 opcode = 0;
  451. opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
  452. (dst_type << DMAE_COMMAND_DST_SHIFT));
  453. opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
  454. opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
  455. opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
  456. (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
  457. opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
  458. #ifdef __BIG_ENDIAN
  459. opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
  460. #else
  461. opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
  462. #endif
  463. if (with_comp)
  464. opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
  465. return opcode;
  466. }
  467. static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
  468. struct dmae_command *dmae,
  469. u8 src_type, u8 dst_type)
  470. {
  471. memset(dmae, 0, sizeof(struct dmae_command));
  472. /* set the opcode */
  473. dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
  474. true, DMAE_COMP_PCI);
  475. /* fill in the completion parameters */
  476. dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
  477. dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
  478. dmae->comp_val = DMAE_COMP_VAL;
  479. }
  480. /* issue a dmae command over the init-channel and wailt for completion */
  481. static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
  482. struct dmae_command *dmae)
  483. {
  484. u32 *wb_comp = bnx2x_sp(bp, wb_comp);
  485. int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
  486. int rc = 0;
  487. DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
  488. bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
  489. bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
  490. /* lock the dmae channel */
  491. spin_lock_bh(&bp->dmae_lock);
  492. /* reset completion */
  493. *wb_comp = 0;
  494. /* post the command on the channel used for initializations */
  495. bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
  496. /* wait for completion */
  497. udelay(5);
  498. while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
  499. DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
  500. if (!cnt) {
  501. BNX2X_ERR("DMAE timeout!\n");
  502. rc = DMAE_TIMEOUT;
  503. goto unlock;
  504. }
  505. cnt--;
  506. udelay(50);
  507. }
  508. if (*wb_comp & DMAE_PCI_ERR_FLAG) {
  509. BNX2X_ERR("DMAE PCI error!\n");
  510. rc = DMAE_PCI_ERROR;
  511. }
  512. DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
  513. bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
  514. bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
  515. unlock:
  516. spin_unlock_bh(&bp->dmae_lock);
  517. return rc;
  518. }
  519. void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
  520. u32 len32)
  521. {
  522. struct dmae_command dmae;
  523. if (!bp->dmae_ready) {
  524. u32 *data = bnx2x_sp(bp, wb_data[0]);
  525. DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
  526. " using indirect\n", dst_addr, len32);
  527. bnx2x_init_ind_wr(bp, dst_addr, data, len32);
  528. return;
  529. }
  530. /* set opcode and fixed command fields */
  531. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
  532. /* fill in addresses and len */
  533. dmae.src_addr_lo = U64_LO(dma_addr);
  534. dmae.src_addr_hi = U64_HI(dma_addr);
  535. dmae.dst_addr_lo = dst_addr >> 2;
  536. dmae.dst_addr_hi = 0;
  537. dmae.len = len32;
  538. bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
  539. /* issue the command and wait for completion */
  540. bnx2x_issue_dmae_with_comp(bp, &dmae);
  541. }
  542. void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
  543. {
  544. struct dmae_command dmae;
  545. if (!bp->dmae_ready) {
  546. u32 *data = bnx2x_sp(bp, wb_data[0]);
  547. int i;
  548. DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
  549. " using indirect\n", src_addr, len32);
  550. for (i = 0; i < len32; i++)
  551. data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
  552. return;
  553. }
  554. /* set opcode and fixed command fields */
  555. bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
  556. /* fill in addresses and len */
  557. dmae.src_addr_lo = src_addr >> 2;
  558. dmae.src_addr_hi = 0;
  559. dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
  560. dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
  561. dmae.len = len32;
  562. bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
  563. /* issue the command and wait for completion */
  564. bnx2x_issue_dmae_with_comp(bp, &dmae);
  565. }
  566. static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
  567. u32 addr, u32 len)
  568. {
  569. int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
  570. int offset = 0;
  571. while (len > dmae_wr_max) {
  572. bnx2x_write_dmae(bp, phys_addr + offset,
  573. addr + offset, dmae_wr_max);
  574. offset += dmae_wr_max * 4;
  575. len -= dmae_wr_max;
  576. }
  577. bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
  578. }
  579. /* used only for slowpath so not inlined */
  580. static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
  581. {
  582. u32 wb_write[2];
  583. wb_write[0] = val_hi;
  584. wb_write[1] = val_lo;
  585. REG_WR_DMAE(bp, reg, wb_write, 2);
  586. }
  587. #ifdef USE_WB_RD
  588. static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
  589. {
  590. u32 wb_data[2];
  591. REG_RD_DMAE(bp, reg, wb_data, 2);
  592. return HILO_U64(wb_data[0], wb_data[1]);
  593. }
  594. #endif
  595. static int bnx2x_mc_assert(struct bnx2x *bp)
  596. {
  597. char last_idx;
  598. int i, rc = 0;
  599. u32 row0, row1, row2, row3;
  600. /* XSTORM */
  601. last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
  602. XSTORM_ASSERT_LIST_INDEX_OFFSET);
  603. if (last_idx)
  604. BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
  605. /* print the asserts */
  606. for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
  607. row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
  608. XSTORM_ASSERT_LIST_OFFSET(i));
  609. row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
  610. XSTORM_ASSERT_LIST_OFFSET(i) + 4);
  611. row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
  612. XSTORM_ASSERT_LIST_OFFSET(i) + 8);
  613. row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
  614. XSTORM_ASSERT_LIST_OFFSET(i) + 12);
  615. if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
  616. BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
  617. " 0x%08x 0x%08x 0x%08x\n",
  618. i, row3, row2, row1, row0);
  619. rc++;
  620. } else {
  621. break;
  622. }
  623. }
  624. /* TSTORM */
  625. last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
  626. TSTORM_ASSERT_LIST_INDEX_OFFSET);
  627. if (last_idx)
  628. BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
  629. /* print the asserts */
  630. for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
  631. row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
  632. TSTORM_ASSERT_LIST_OFFSET(i));
  633. row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
  634. TSTORM_ASSERT_LIST_OFFSET(i) + 4);
  635. row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
  636. TSTORM_ASSERT_LIST_OFFSET(i) + 8);
  637. row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
  638. TSTORM_ASSERT_LIST_OFFSET(i) + 12);
  639. if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
  640. BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
  641. " 0x%08x 0x%08x 0x%08x\n",
  642. i, row3, row2, row1, row0);
  643. rc++;
  644. } else {
  645. break;
  646. }
  647. }
  648. /* CSTORM */
  649. last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
  650. CSTORM_ASSERT_LIST_INDEX_OFFSET);
  651. if (last_idx)
  652. BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
  653. /* print the asserts */
  654. for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
  655. row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
  656. CSTORM_ASSERT_LIST_OFFSET(i));
  657. row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
  658. CSTORM_ASSERT_LIST_OFFSET(i) + 4);
  659. row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
  660. CSTORM_ASSERT_LIST_OFFSET(i) + 8);
  661. row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
  662. CSTORM_ASSERT_LIST_OFFSET(i) + 12);
  663. if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
  664. BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
  665. " 0x%08x 0x%08x 0x%08x\n",
  666. i, row3, row2, row1, row0);
  667. rc++;
  668. } else {
  669. break;
  670. }
  671. }
  672. /* USTORM */
  673. last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
  674. USTORM_ASSERT_LIST_INDEX_OFFSET);
  675. if (last_idx)
  676. BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
  677. /* print the asserts */
  678. for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
  679. row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
  680. USTORM_ASSERT_LIST_OFFSET(i));
  681. row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
  682. USTORM_ASSERT_LIST_OFFSET(i) + 4);
  683. row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
  684. USTORM_ASSERT_LIST_OFFSET(i) + 8);
  685. row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
  686. USTORM_ASSERT_LIST_OFFSET(i) + 12);
  687. if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
  688. BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
  689. " 0x%08x 0x%08x 0x%08x\n",
  690. i, row3, row2, row1, row0);
  691. rc++;
  692. } else {
  693. break;
  694. }
  695. }
  696. return rc;
  697. }
  698. static void bnx2x_fw_dump(struct bnx2x *bp)
  699. {
  700. u32 addr;
  701. u32 mark, offset;
  702. __be32 data[9];
  703. int word;
  704. u32 trace_shmem_base;
  705. if (BP_NOMCP(bp)) {
  706. BNX2X_ERR("NO MCP - can not dump\n");
  707. return;
  708. }
  709. if (BP_PATH(bp) == 0)
  710. trace_shmem_base = bp->common.shmem_base;
  711. else
  712. trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
  713. addr = trace_shmem_base - 0x0800 + 4;
  714. mark = REG_RD(bp, addr);
  715. mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
  716. + ((mark + 0x3) & ~0x3) - 0x08000000;
  717. pr_err("begin fw dump (mark 0x%x)\n", mark);
  718. pr_err("");
  719. for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
  720. for (word = 0; word < 8; word++)
  721. data[word] = htonl(REG_RD(bp, offset + 4*word));
  722. data[8] = 0x0;
  723. pr_cont("%s", (char *)data);
  724. }
  725. for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
  726. for (word = 0; word < 8; word++)
  727. data[word] = htonl(REG_RD(bp, offset + 4*word));
  728. data[8] = 0x0;
  729. pr_cont("%s", (char *)data);
  730. }
  731. pr_err("end of fw dump\n");
  732. }
  733. void bnx2x_panic_dump(struct bnx2x *bp)
  734. {
  735. int i;
  736. u16 j;
  737. struct hc_sp_status_block_data sp_sb_data;
  738. int func = BP_FUNC(bp);
  739. #ifdef BNX2X_STOP_ON_ERROR
  740. u16 start = 0, end = 0;
  741. #endif
  742. bp->stats_state = STATS_STATE_DISABLED;
  743. DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
  744. BNX2X_ERR("begin crash dump -----------------\n");
  745. /* Indices */
  746. /* Common */
  747. BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
  748. " spq_prod_idx(0x%x)\n",
  749. bp->def_idx, bp->def_att_idx,
  750. bp->attn_state, bp->spq_prod_idx);
  751. BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
  752. bp->def_status_blk->atten_status_block.attn_bits,
  753. bp->def_status_blk->atten_status_block.attn_bits_ack,
  754. bp->def_status_blk->atten_status_block.status_block_id,
  755. bp->def_status_blk->atten_status_block.attn_bits_index);
  756. BNX2X_ERR(" def (");
  757. for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
  758. pr_cont("0x%x%s",
  759. bp->def_status_blk->sp_sb.index_values[i],
  760. (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
  761. for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
  762. *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
  763. CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
  764. i*sizeof(u32));
  765. pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
  766. "pf_id(0x%x) vnic_id(0x%x) "
  767. "vf_id(0x%x) vf_valid (0x%x)\n",
  768. sp_sb_data.igu_sb_id,
  769. sp_sb_data.igu_seg_id,
  770. sp_sb_data.p_func.pf_id,
  771. sp_sb_data.p_func.vnic_id,
  772. sp_sb_data.p_func.vf_id,
  773. sp_sb_data.p_func.vf_valid);
  774. for_each_eth_queue(bp, i) {
  775. struct bnx2x_fastpath *fp = &bp->fp[i];
  776. int loop;
  777. struct hc_status_block_data_e2 sb_data_e2;
  778. struct hc_status_block_data_e1x sb_data_e1x;
  779. struct hc_status_block_sm *hc_sm_p =
  780. CHIP_IS_E2(bp) ?
  781. sb_data_e2.common.state_machine :
  782. sb_data_e1x.common.state_machine;
  783. struct hc_index_data *hc_index_p =
  784. CHIP_IS_E2(bp) ?
  785. sb_data_e2.index_data :
  786. sb_data_e1x.index_data;
  787. int data_size;
  788. u32 *sb_data_p;
  789. /* Rx */
  790. BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
  791. " rx_comp_prod(0x%x)"
  792. " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
  793. i, fp->rx_bd_prod, fp->rx_bd_cons,
  794. fp->rx_comp_prod,
  795. fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
  796. BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
  797. " fp_hc_idx(0x%x)\n",
  798. fp->rx_sge_prod, fp->last_max_sge,
  799. le16_to_cpu(fp->fp_hc_idx));
  800. /* Tx */
  801. BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
  802. " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
  803. " *tx_cons_sb(0x%x)\n",
  804. i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
  805. fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
  806. loop = CHIP_IS_E2(bp) ?
  807. HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
  808. /* host sb data */
  809. #ifdef BCM_CNIC
  810. if (IS_FCOE_FP(fp))
  811. continue;
  812. #endif
  813. BNX2X_ERR(" run indexes (");
  814. for (j = 0; j < HC_SB_MAX_SM; j++)
  815. pr_cont("0x%x%s",
  816. fp->sb_running_index[j],
  817. (j == HC_SB_MAX_SM - 1) ? ")" : " ");
  818. BNX2X_ERR(" indexes (");
  819. for (j = 0; j < loop; j++)
  820. pr_cont("0x%x%s",
  821. fp->sb_index_values[j],
  822. (j == loop - 1) ? ")" : " ");
  823. /* fw sb data */
  824. data_size = CHIP_IS_E2(bp) ?
  825. sizeof(struct hc_status_block_data_e2) :
  826. sizeof(struct hc_status_block_data_e1x);
  827. data_size /= sizeof(u32);
  828. sb_data_p = CHIP_IS_E2(bp) ?
  829. (u32 *)&sb_data_e2 :
  830. (u32 *)&sb_data_e1x;
  831. /* copy sb data in here */
  832. for (j = 0; j < data_size; j++)
  833. *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
  834. CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
  835. j * sizeof(u32));
  836. if (CHIP_IS_E2(bp)) {
  837. pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
  838. "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
  839. sb_data_e2.common.p_func.pf_id,
  840. sb_data_e2.common.p_func.vf_id,
  841. sb_data_e2.common.p_func.vf_valid,
  842. sb_data_e2.common.p_func.vnic_id,
  843. sb_data_e2.common.same_igu_sb_1b);
  844. } else {
  845. pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
  846. "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
  847. sb_data_e1x.common.p_func.pf_id,
  848. sb_data_e1x.common.p_func.vf_id,
  849. sb_data_e1x.common.p_func.vf_valid,
  850. sb_data_e1x.common.p_func.vnic_id,
  851. sb_data_e1x.common.same_igu_sb_1b);
  852. }
  853. /* SB_SMs data */
  854. for (j = 0; j < HC_SB_MAX_SM; j++) {
  855. pr_cont("SM[%d] __flags (0x%x) "
  856. "igu_sb_id (0x%x) igu_seg_id(0x%x) "
  857. "time_to_expire (0x%x) "
  858. "timer_value(0x%x)\n", j,
  859. hc_sm_p[j].__flags,
  860. hc_sm_p[j].igu_sb_id,
  861. hc_sm_p[j].igu_seg_id,
  862. hc_sm_p[j].time_to_expire,
  863. hc_sm_p[j].timer_value);
  864. }
  865. /* Indecies data */
  866. for (j = 0; j < loop; j++) {
  867. pr_cont("INDEX[%d] flags (0x%x) "
  868. "timeout (0x%x)\n", j,
  869. hc_index_p[j].flags,
  870. hc_index_p[j].timeout);
  871. }
  872. }
  873. #ifdef BNX2X_STOP_ON_ERROR
  874. /* Rings */
  875. /* Rx */
  876. for_each_rx_queue(bp, i) {
  877. struct bnx2x_fastpath *fp = &bp->fp[i];
  878. start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
  879. end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
  880. for (j = start; j != end; j = RX_BD(j + 1)) {
  881. u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
  882. struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
  883. BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
  884. i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
  885. }
  886. start = RX_SGE(fp->rx_sge_prod);
  887. end = RX_SGE(fp->last_max_sge);
  888. for (j = start; j != end; j = RX_SGE(j + 1)) {
  889. u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
  890. struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
  891. BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
  892. i, j, rx_sge[1], rx_sge[0], sw_page->page);
  893. }
  894. start = RCQ_BD(fp->rx_comp_cons - 10);
  895. end = RCQ_BD(fp->rx_comp_cons + 503);
  896. for (j = start; j != end; j = RCQ_BD(j + 1)) {
  897. u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
  898. BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
  899. i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
  900. }
  901. }
  902. /* Tx */
  903. for_each_tx_queue(bp, i) {
  904. struct bnx2x_fastpath *fp = &bp->fp[i];
  905. start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
  906. end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
  907. for (j = start; j != end; j = TX_BD(j + 1)) {
  908. struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
  909. BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
  910. i, j, sw_bd->skb, sw_bd->first_bd);
  911. }
  912. start = TX_BD(fp->tx_bd_cons - 10);
  913. end = TX_BD(fp->tx_bd_cons + 254);
  914. for (j = start; j != end; j = TX_BD(j + 1)) {
  915. u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
  916. BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
  917. i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
  918. }
  919. }
  920. #endif
  921. bnx2x_fw_dump(bp);
  922. bnx2x_mc_assert(bp);
  923. BNX2X_ERR("end crash dump -----------------\n");
  924. }
  925. static void bnx2x_hc_int_enable(struct bnx2x *bp)
  926. {
  927. int port = BP_PORT(bp);
  928. u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
  929. u32 val = REG_RD(bp, addr);
  930. int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
  931. int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
  932. if (msix) {
  933. val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  934. HC_CONFIG_0_REG_INT_LINE_EN_0);
  935. val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
  936. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  937. } else if (msi) {
  938. val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
  939. val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  940. HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
  941. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  942. } else {
  943. val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  944. HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
  945. HC_CONFIG_0_REG_INT_LINE_EN_0 |
  946. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  947. if (!CHIP_IS_E1(bp)) {
  948. DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
  949. val, port, addr);
  950. REG_WR(bp, addr, val);
  951. val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
  952. }
  953. }
  954. if (CHIP_IS_E1(bp))
  955. REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
  956. DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
  957. val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
  958. REG_WR(bp, addr, val);
  959. /*
  960. * Ensure that HC_CONFIG is written before leading/trailing edge config
  961. */
  962. mmiowb();
  963. barrier();
  964. if (!CHIP_IS_E1(bp)) {
  965. /* init leading/trailing edge */
  966. if (IS_MF(bp)) {
  967. val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
  968. if (bp->port.pmf)
  969. /* enable nig and gpio3 attention */
  970. val |= 0x1100;
  971. } else
  972. val = 0xffff;
  973. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
  974. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
  975. }
  976. /* Make sure that interrupts are indeed enabled from here on */
  977. mmiowb();
  978. }
  979. static void bnx2x_igu_int_enable(struct bnx2x *bp)
  980. {
  981. u32 val;
  982. int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
  983. int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
  984. val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
  985. if (msix) {
  986. val &= ~(IGU_PF_CONF_INT_LINE_EN |
  987. IGU_PF_CONF_SINGLE_ISR_EN);
  988. val |= (IGU_PF_CONF_FUNC_EN |
  989. IGU_PF_CONF_MSI_MSIX_EN |
  990. IGU_PF_CONF_ATTN_BIT_EN);
  991. } else if (msi) {
  992. val &= ~IGU_PF_CONF_INT_LINE_EN;
  993. val |= (IGU_PF_CONF_FUNC_EN |
  994. IGU_PF_CONF_MSI_MSIX_EN |
  995. IGU_PF_CONF_ATTN_BIT_EN |
  996. IGU_PF_CONF_SINGLE_ISR_EN);
  997. } else {
  998. val &= ~IGU_PF_CONF_MSI_MSIX_EN;
  999. val |= (IGU_PF_CONF_FUNC_EN |
  1000. IGU_PF_CONF_INT_LINE_EN |
  1001. IGU_PF_CONF_ATTN_BIT_EN |
  1002. IGU_PF_CONF_SINGLE_ISR_EN);
  1003. }
  1004. DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
  1005. val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
  1006. REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
  1007. barrier();
  1008. /* init leading/trailing edge */
  1009. if (IS_MF(bp)) {
  1010. val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
  1011. if (bp->port.pmf)
  1012. /* enable nig and gpio3 attention */
  1013. val |= 0x1100;
  1014. } else
  1015. val = 0xffff;
  1016. REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
  1017. REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
  1018. /* Make sure that interrupts are indeed enabled from here on */
  1019. mmiowb();
  1020. }
  1021. void bnx2x_int_enable(struct bnx2x *bp)
  1022. {
  1023. if (bp->common.int_block == INT_BLOCK_HC)
  1024. bnx2x_hc_int_enable(bp);
  1025. else
  1026. bnx2x_igu_int_enable(bp);
  1027. }
  1028. static void bnx2x_hc_int_disable(struct bnx2x *bp)
  1029. {
  1030. int port = BP_PORT(bp);
  1031. u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
  1032. u32 val = REG_RD(bp, addr);
  1033. /*
  1034. * in E1 we must use only PCI configuration space to disable
  1035. * MSI/MSIX capablility
  1036. * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
  1037. */
  1038. if (CHIP_IS_E1(bp)) {
  1039. /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
  1040. * Use mask register to prevent from HC sending interrupts
  1041. * after we exit the function
  1042. */
  1043. REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
  1044. val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  1045. HC_CONFIG_0_REG_INT_LINE_EN_0 |
  1046. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  1047. } else
  1048. val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
  1049. HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
  1050. HC_CONFIG_0_REG_INT_LINE_EN_0 |
  1051. HC_CONFIG_0_REG_ATTN_BIT_EN_0);
  1052. DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
  1053. val, port, addr);
  1054. /* flush all outstanding writes */
  1055. mmiowb();
  1056. REG_WR(bp, addr, val);
  1057. if (REG_RD(bp, addr) != val)
  1058. BNX2X_ERR("BUG! proper val not read from IGU!\n");
  1059. }
  1060. static void bnx2x_igu_int_disable(struct bnx2x *bp)
  1061. {
  1062. u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
  1063. val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
  1064. IGU_PF_CONF_INT_LINE_EN |
  1065. IGU_PF_CONF_ATTN_BIT_EN);
  1066. DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
  1067. /* flush all outstanding writes */
  1068. mmiowb();
  1069. REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
  1070. if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
  1071. BNX2X_ERR("BUG! proper val not read from IGU!\n");
  1072. }
  1073. static void bnx2x_int_disable(struct bnx2x *bp)
  1074. {
  1075. if (bp->common.int_block == INT_BLOCK_HC)
  1076. bnx2x_hc_int_disable(bp);
  1077. else
  1078. bnx2x_igu_int_disable(bp);
  1079. }
  1080. void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
  1081. {
  1082. int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
  1083. int i, offset;
  1084. /* disable interrupt handling */
  1085. atomic_inc(&bp->intr_sem);
  1086. smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
  1087. if (disable_hw)
  1088. /* prevent the HW from sending interrupts */
  1089. bnx2x_int_disable(bp);
  1090. /* make sure all ISRs are done */
  1091. if (msix) {
  1092. synchronize_irq(bp->msix_table[0].vector);
  1093. offset = 1;
  1094. #ifdef BCM_CNIC
  1095. offset++;
  1096. #endif
  1097. for_each_eth_queue(bp, i)
  1098. synchronize_irq(bp->msix_table[i + offset].vector);
  1099. } else
  1100. synchronize_irq(bp->pdev->irq);
  1101. /* make sure sp_task is not running */
  1102. cancel_delayed_work(&bp->sp_task);
  1103. flush_workqueue(bnx2x_wq);
  1104. }
  1105. /* fast path */
  1106. /*
  1107. * General service functions
  1108. */
  1109. /* Return true if succeeded to acquire the lock */
  1110. static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
  1111. {
  1112. u32 lock_status;
  1113. u32 resource_bit = (1 << resource);
  1114. int func = BP_FUNC(bp);
  1115. u32 hw_lock_control_reg;
  1116. DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
  1117. /* Validating that the resource is within range */
  1118. if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
  1119. DP(NETIF_MSG_HW,
  1120. "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
  1121. resource, HW_LOCK_MAX_RESOURCE_VALUE);
  1122. return false;
  1123. }
  1124. if (func <= 5)
  1125. hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
  1126. else
  1127. hw_lock_control_reg =
  1128. (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
  1129. /* Try to acquire the lock */
  1130. REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
  1131. lock_status = REG_RD(bp, hw_lock_control_reg);
  1132. if (lock_status & resource_bit)
  1133. return true;
  1134. DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
  1135. return false;
  1136. }
  1137. #ifdef BCM_CNIC
  1138. static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
  1139. #endif
  1140. void bnx2x_sp_event(struct bnx2x_fastpath *fp,
  1141. union eth_rx_cqe *rr_cqe)
  1142. {
  1143. struct bnx2x *bp = fp->bp;
  1144. int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
  1145. int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
  1146. DP(BNX2X_MSG_SP,
  1147. "fp %d cid %d got ramrod #%d state is %x type is %d\n",
  1148. fp->index, cid, command, bp->state,
  1149. rr_cqe->ramrod_cqe.ramrod_type);
  1150. switch (command | fp->state) {
  1151. case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
  1152. DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
  1153. fp->state = BNX2X_FP_STATE_OPEN;
  1154. break;
  1155. case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
  1156. DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
  1157. fp->state = BNX2X_FP_STATE_HALTED;
  1158. break;
  1159. case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
  1160. DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
  1161. fp->state = BNX2X_FP_STATE_TERMINATED;
  1162. break;
  1163. default:
  1164. BNX2X_ERR("unexpected MC reply (%d) "
  1165. "fp[%d] state is %x\n",
  1166. command, fp->index, fp->state);
  1167. break;
  1168. }
  1169. smp_mb__before_atomic_inc();
  1170. atomic_inc(&bp->cq_spq_left);
  1171. /* push the change in fp->state and towards the memory */
  1172. smp_wmb();
  1173. return;
  1174. }
  1175. irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
  1176. {
  1177. struct bnx2x *bp = netdev_priv(dev_instance);
  1178. u16 status = bnx2x_ack_int(bp);
  1179. u16 mask;
  1180. int i;
  1181. /* Return here if interrupt is shared and it's not for us */
  1182. if (unlikely(status == 0)) {
  1183. DP(NETIF_MSG_INTR, "not our interrupt!\n");
  1184. return IRQ_NONE;
  1185. }
  1186. DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
  1187. /* Return here if interrupt is disabled */
  1188. if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
  1189. DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
  1190. return IRQ_HANDLED;
  1191. }
  1192. #ifdef BNX2X_STOP_ON_ERROR
  1193. if (unlikely(bp->panic))
  1194. return IRQ_HANDLED;
  1195. #endif
  1196. for_each_eth_queue(bp, i) {
  1197. struct bnx2x_fastpath *fp = &bp->fp[i];
  1198. mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
  1199. if (status & mask) {
  1200. /* Handle Rx and Tx according to SB id */
  1201. prefetch(fp->rx_cons_sb);
  1202. prefetch(fp->tx_cons_sb);
  1203. prefetch(&fp->sb_running_index[SM_RX_ID]);
  1204. napi_schedule(&bnx2x_fp(bp, fp->index, napi));
  1205. status &= ~mask;
  1206. }
  1207. }
  1208. #ifdef BCM_CNIC
  1209. mask = 0x2;
  1210. if (status & (mask | 0x1)) {
  1211. struct cnic_ops *c_ops = NULL;
  1212. rcu_read_lock();
  1213. c_ops = rcu_dereference(bp->cnic_ops);
  1214. if (c_ops)
  1215. c_ops->cnic_handler(bp->cnic_data, NULL);
  1216. rcu_read_unlock();
  1217. status &= ~mask;
  1218. }
  1219. #endif
  1220. if (unlikely(status & 0x1)) {
  1221. queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
  1222. status &= ~0x1;
  1223. if (!status)
  1224. return IRQ_HANDLED;
  1225. }
  1226. if (unlikely(status))
  1227. DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
  1228. status);
  1229. return IRQ_HANDLED;
  1230. }
  1231. /* end of fast path */
  1232. /* Link */
  1233. /*
  1234. * General service functions
  1235. */
  1236. int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
  1237. {
  1238. u32 lock_status;
  1239. u32 resource_bit = (1 << resource);
  1240. int func = BP_FUNC(bp);
  1241. u32 hw_lock_control_reg;
  1242. int cnt;
  1243. /* Validating that the resource is within range */
  1244. if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
  1245. DP(NETIF_MSG_HW,
  1246. "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
  1247. resource, HW_LOCK_MAX_RESOURCE_VALUE);
  1248. return -EINVAL;
  1249. }
  1250. if (func <= 5) {
  1251. hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
  1252. } else {
  1253. hw_lock_control_reg =
  1254. (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
  1255. }
  1256. /* Validating that the resource is not already taken */
  1257. lock_status = REG_RD(bp, hw_lock_control_reg);
  1258. if (lock_status & resource_bit) {
  1259. DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
  1260. lock_status, resource_bit);
  1261. return -EEXIST;
  1262. }
  1263. /* Try for 5 second every 5ms */
  1264. for (cnt = 0; cnt < 1000; cnt++) {
  1265. /* Try to acquire the lock */
  1266. REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
  1267. lock_status = REG_RD(bp, hw_lock_control_reg);
  1268. if (lock_status & resource_bit)
  1269. return 0;
  1270. msleep(5);
  1271. }
  1272. DP(NETIF_MSG_HW, "Timeout\n");
  1273. return -EAGAIN;
  1274. }
  1275. int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
  1276. {
  1277. u32 lock_status;
  1278. u32 resource_bit = (1 << resource);
  1279. int func = BP_FUNC(bp);
  1280. u32 hw_lock_control_reg;
  1281. DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
  1282. /* Validating that the resource is within range */
  1283. if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
  1284. DP(NETIF_MSG_HW,
  1285. "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
  1286. resource, HW_LOCK_MAX_RESOURCE_VALUE);
  1287. return -EINVAL;
  1288. }
  1289. if (func <= 5) {
  1290. hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
  1291. } else {
  1292. hw_lock_control_reg =
  1293. (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
  1294. }
  1295. /* Validating that the resource is currently taken */
  1296. lock_status = REG_RD(bp, hw_lock_control_reg);
  1297. if (!(lock_status & resource_bit)) {
  1298. DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
  1299. lock_status, resource_bit);
  1300. return -EFAULT;
  1301. }
  1302. REG_WR(bp, hw_lock_control_reg, resource_bit);
  1303. return 0;
  1304. }
  1305. int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
  1306. {
  1307. /* The GPIO should be swapped if swap register is set and active */
  1308. int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
  1309. REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
  1310. int gpio_shift = gpio_num +
  1311. (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
  1312. u32 gpio_mask = (1 << gpio_shift);
  1313. u32 gpio_reg;
  1314. int value;
  1315. if (gpio_num > MISC_REGISTERS_GPIO_3) {
  1316. BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
  1317. return -EINVAL;
  1318. }
  1319. /* read GPIO value */
  1320. gpio_reg = REG_RD(bp, MISC_REG_GPIO);
  1321. /* get the requested pin value */
  1322. if ((gpio_reg & gpio_mask) == gpio_mask)
  1323. value = 1;
  1324. else
  1325. value = 0;
  1326. DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
  1327. return value;
  1328. }
  1329. int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
  1330. {
  1331. /* The GPIO should be swapped if swap register is set and active */
  1332. int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
  1333. REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
  1334. int gpio_shift = gpio_num +
  1335. (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
  1336. u32 gpio_mask = (1 << gpio_shift);
  1337. u32 gpio_reg;
  1338. if (gpio_num > MISC_REGISTERS_GPIO_3) {
  1339. BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
  1340. return -EINVAL;
  1341. }
  1342. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
  1343. /* read GPIO and mask except the float bits */
  1344. gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
  1345. switch (mode) {
  1346. case MISC_REGISTERS_GPIO_OUTPUT_LOW:
  1347. DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
  1348. gpio_num, gpio_shift);
  1349. /* clear FLOAT and set CLR */
  1350. gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
  1351. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
  1352. break;
  1353. case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
  1354. DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
  1355. gpio_num, gpio_shift);
  1356. /* clear FLOAT and set SET */
  1357. gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
  1358. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
  1359. break;
  1360. case MISC_REGISTERS_GPIO_INPUT_HI_Z:
  1361. DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
  1362. gpio_num, gpio_shift);
  1363. /* set FLOAT */
  1364. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
  1365. break;
  1366. default:
  1367. break;
  1368. }
  1369. REG_WR(bp, MISC_REG_GPIO, gpio_reg);
  1370. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
  1371. return 0;
  1372. }
  1373. int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
  1374. {
  1375. /* The GPIO should be swapped if swap register is set and active */
  1376. int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
  1377. REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
  1378. int gpio_shift = gpio_num +
  1379. (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
  1380. u32 gpio_mask = (1 << gpio_shift);
  1381. u32 gpio_reg;
  1382. if (gpio_num > MISC_REGISTERS_GPIO_3) {
  1383. BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
  1384. return -EINVAL;
  1385. }
  1386. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
  1387. /* read GPIO int */
  1388. gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
  1389. switch (mode) {
  1390. case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
  1391. DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
  1392. "output low\n", gpio_num, gpio_shift);
  1393. /* clear SET and set CLR */
  1394. gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
  1395. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
  1396. break;
  1397. case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
  1398. DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
  1399. "output high\n", gpio_num, gpio_shift);
  1400. /* clear CLR and set SET */
  1401. gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
  1402. gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
  1403. break;
  1404. default:
  1405. break;
  1406. }
  1407. REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
  1408. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
  1409. return 0;
  1410. }
  1411. static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
  1412. {
  1413. u32 spio_mask = (1 << spio_num);
  1414. u32 spio_reg;
  1415. if ((spio_num < MISC_REGISTERS_SPIO_4) ||
  1416. (spio_num > MISC_REGISTERS_SPIO_7)) {
  1417. BNX2X_ERR("Invalid SPIO %d\n", spio_num);
  1418. return -EINVAL;
  1419. }
  1420. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
  1421. /* read SPIO and mask except the float bits */
  1422. spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
  1423. switch (mode) {
  1424. case MISC_REGISTERS_SPIO_OUTPUT_LOW:
  1425. DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
  1426. /* clear FLOAT and set CLR */
  1427. spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
  1428. spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
  1429. break;
  1430. case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
  1431. DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
  1432. /* clear FLOAT and set SET */
  1433. spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
  1434. spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
  1435. break;
  1436. case MISC_REGISTERS_SPIO_INPUT_HI_Z:
  1437. DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
  1438. /* set FLOAT */
  1439. spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
  1440. break;
  1441. default:
  1442. break;
  1443. }
  1444. REG_WR(bp, MISC_REG_SPIO, spio_reg);
  1445. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
  1446. return 0;
  1447. }
  1448. int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
  1449. {
  1450. u32 sel_phy_idx = 0;
  1451. if (bp->link_vars.link_up) {
  1452. sel_phy_idx = EXT_PHY1;
  1453. /* In case link is SERDES, check if the EXT_PHY2 is the one */
  1454. if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
  1455. (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
  1456. sel_phy_idx = EXT_PHY2;
  1457. } else {
  1458. switch (bnx2x_phy_selection(&bp->link_params)) {
  1459. case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
  1460. case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
  1461. case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
  1462. sel_phy_idx = EXT_PHY1;
  1463. break;
  1464. case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
  1465. case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
  1466. sel_phy_idx = EXT_PHY2;
  1467. break;
  1468. }
  1469. }
  1470. /*
  1471. * The selected actived PHY is always after swapping (in case PHY
  1472. * swapping is enabled). So when swapping is enabled, we need to reverse
  1473. * the configuration
  1474. */
  1475. if (bp->link_params.multi_phy_config &
  1476. PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
  1477. if (sel_phy_idx == EXT_PHY1)
  1478. sel_phy_idx = EXT_PHY2;
  1479. else if (sel_phy_idx == EXT_PHY2)
  1480. sel_phy_idx = EXT_PHY1;
  1481. }
  1482. return LINK_CONFIG_IDX(sel_phy_idx);
  1483. }
  1484. void bnx2x_calc_fc_adv(struct bnx2x *bp)
  1485. {
  1486. u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
  1487. switch (bp->link_vars.ieee_fc &
  1488. MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
  1489. case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
  1490. bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
  1491. ADVERTISED_Pause);
  1492. break;
  1493. case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
  1494. bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
  1495. ADVERTISED_Pause);
  1496. break;
  1497. case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
  1498. bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
  1499. break;
  1500. default:
  1501. bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
  1502. ADVERTISED_Pause);
  1503. break;
  1504. }
  1505. }
  1506. u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
  1507. {
  1508. if (!BP_NOMCP(bp)) {
  1509. u8 rc;
  1510. int cfx_idx = bnx2x_get_link_cfg_idx(bp);
  1511. u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
  1512. /* Initialize link parameters structure variables */
  1513. /* It is recommended to turn off RX FC for jumbo frames
  1514. for better performance */
  1515. if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
  1516. bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
  1517. else
  1518. bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
  1519. bnx2x_acquire_phy_lock(bp);
  1520. if (load_mode == LOAD_DIAG) {
  1521. bp->link_params.loopback_mode = LOOPBACK_XGXS;
  1522. bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
  1523. }
  1524. rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
  1525. bnx2x_release_phy_lock(bp);
  1526. bnx2x_calc_fc_adv(bp);
  1527. if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
  1528. bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
  1529. bnx2x_link_report(bp);
  1530. }
  1531. bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
  1532. return rc;
  1533. }
  1534. BNX2X_ERR("Bootcode is missing - can not initialize link\n");
  1535. return -EINVAL;
  1536. }
  1537. void bnx2x_link_set(struct bnx2x *bp)
  1538. {
  1539. if (!BP_NOMCP(bp)) {
  1540. bnx2x_acquire_phy_lock(bp);
  1541. bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
  1542. bnx2x_phy_init(&bp->link_params, &bp->link_vars);
  1543. bnx2x_release_phy_lock(bp);
  1544. bnx2x_calc_fc_adv(bp);
  1545. } else
  1546. BNX2X_ERR("Bootcode is missing - can not set link\n");
  1547. }
  1548. static void bnx2x__link_reset(struct bnx2x *bp)
  1549. {
  1550. if (!BP_NOMCP(bp)) {
  1551. bnx2x_acquire_phy_lock(bp);
  1552. bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
  1553. bnx2x_release_phy_lock(bp);
  1554. } else
  1555. BNX2X_ERR("Bootcode is missing - can not reset link\n");
  1556. }
  1557. u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
  1558. {
  1559. u8 rc = 0;
  1560. if (!BP_NOMCP(bp)) {
  1561. bnx2x_acquire_phy_lock(bp);
  1562. rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
  1563. is_serdes);
  1564. bnx2x_release_phy_lock(bp);
  1565. } else
  1566. BNX2X_ERR("Bootcode is missing - can not test link\n");
  1567. return rc;
  1568. }
  1569. static void bnx2x_init_port_minmax(struct bnx2x *bp)
  1570. {
  1571. u32 r_param = bp->link_vars.line_speed / 8;
  1572. u32 fair_periodic_timeout_usec;
  1573. u32 t_fair;
  1574. memset(&(bp->cmng.rs_vars), 0,
  1575. sizeof(struct rate_shaping_vars_per_port));
  1576. memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
  1577. /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
  1578. bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
  1579. /* this is the threshold below which no timer arming will occur
  1580. 1.25 coefficient is for the threshold to be a little bigger
  1581. than the real time, to compensate for timer in-accuracy */
  1582. bp->cmng.rs_vars.rs_threshold =
  1583. (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
  1584. /* resolution of fairness timer */
  1585. fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
  1586. /* for 10G it is 1000usec. for 1G it is 10000usec. */
  1587. t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
  1588. /* this is the threshold below which we won't arm the timer anymore */
  1589. bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
  1590. /* we multiply by 1e3/8 to get bytes/msec.
  1591. We don't want the credits to pass a credit
  1592. of the t_fair*FAIR_MEM (algorithm resolution) */
  1593. bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
  1594. /* since each tick is 4 usec */
  1595. bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
  1596. }
  1597. /* Calculates the sum of vn_min_rates.
  1598. It's needed for further normalizing of the min_rates.
  1599. Returns:
  1600. sum of vn_min_rates.
  1601. or
  1602. 0 - if all the min_rates are 0.
  1603. In the later case fainess algorithm should be deactivated.
  1604. If not all min_rates are zero then those that are zeroes will be set to 1.
  1605. */
  1606. static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
  1607. {
  1608. int all_zero = 1;
  1609. int vn;
  1610. bp->vn_weight_sum = 0;
  1611. for (vn = VN_0; vn < E1HVN_MAX; vn++) {
  1612. u32 vn_cfg = bp->mf_config[vn];
  1613. u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
  1614. FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
  1615. /* Skip hidden vns */
  1616. if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
  1617. continue;
  1618. /* If min rate is zero - set it to 1 */
  1619. if (!vn_min_rate)
  1620. vn_min_rate = DEF_MIN_RATE;
  1621. else
  1622. all_zero = 0;
  1623. bp->vn_weight_sum += vn_min_rate;
  1624. }
  1625. /* ... only if all min rates are zeros - disable fairness */
  1626. if (all_zero) {
  1627. bp->cmng.flags.cmng_enables &=
  1628. ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
  1629. DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
  1630. " fairness will be disabled\n");
  1631. } else
  1632. bp->cmng.flags.cmng_enables |=
  1633. CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
  1634. }
  1635. static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
  1636. {
  1637. struct rate_shaping_vars_per_vn m_rs_vn;
  1638. struct fairness_vars_per_vn m_fair_vn;
  1639. u32 vn_cfg = bp->mf_config[vn];
  1640. int func = 2*vn + BP_PORT(bp);
  1641. u16 vn_min_rate, vn_max_rate;
  1642. int i;
  1643. /* If function is hidden - set min and max to zeroes */
  1644. if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
  1645. vn_min_rate = 0;
  1646. vn_max_rate = 0;
  1647. } else {
  1648. u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
  1649. vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
  1650. FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
  1651. /* If fairness is enabled (not all min rates are zeroes) and
  1652. if current min rate is zero - set it to 1.
  1653. This is a requirement of the algorithm. */
  1654. if (bp->vn_weight_sum && (vn_min_rate == 0))
  1655. vn_min_rate = DEF_MIN_RATE;
  1656. if (IS_MF_SI(bp))
  1657. /* maxCfg in percents of linkspeed */
  1658. vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
  1659. else
  1660. /* maxCfg is absolute in 100Mb units */
  1661. vn_max_rate = maxCfg * 100;
  1662. }
  1663. DP(NETIF_MSG_IFUP,
  1664. "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
  1665. func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
  1666. memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
  1667. memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
  1668. /* global vn counter - maximal Mbps for this vn */
  1669. m_rs_vn.vn_counter.rate = vn_max_rate;
  1670. /* quota - number of bytes transmitted in this period */
  1671. m_rs_vn.vn_counter.quota =
  1672. (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
  1673. if (bp->vn_weight_sum) {
  1674. /* credit for each period of the fairness algorithm:
  1675. number of bytes in T_FAIR (the vn share the port rate).
  1676. vn_weight_sum should not be larger than 10000, thus
  1677. T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
  1678. than zero */
  1679. m_fair_vn.vn_credit_delta =
  1680. max_t(u32, (vn_min_rate * (T_FAIR_COEF /
  1681. (8 * bp->vn_weight_sum))),
  1682. (bp->cmng.fair_vars.fair_threshold +
  1683. MIN_ABOVE_THRESH));
  1684. DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
  1685. m_fair_vn.vn_credit_delta);
  1686. }
  1687. /* Store it to internal memory */
  1688. for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
  1689. REG_WR(bp, BAR_XSTRORM_INTMEM +
  1690. XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
  1691. ((u32 *)(&m_rs_vn))[i]);
  1692. for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
  1693. REG_WR(bp, BAR_XSTRORM_INTMEM +
  1694. XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
  1695. ((u32 *)(&m_fair_vn))[i]);
  1696. }
  1697. static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
  1698. {
  1699. if (CHIP_REV_IS_SLOW(bp))
  1700. return CMNG_FNS_NONE;
  1701. if (IS_MF(bp))
  1702. return CMNG_FNS_MINMAX;
  1703. return CMNG_FNS_NONE;
  1704. }
  1705. void bnx2x_read_mf_cfg(struct bnx2x *bp)
  1706. {
  1707. int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
  1708. if (BP_NOMCP(bp))
  1709. return; /* what should be the default bvalue in this case */
  1710. /* For 2 port configuration the absolute function number formula
  1711. * is:
  1712. * abs_func = 2 * vn + BP_PORT + BP_PATH
  1713. *
  1714. * and there are 4 functions per port
  1715. *
  1716. * For 4 port configuration it is
  1717. * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
  1718. *
  1719. * and there are 2 functions per port
  1720. */
  1721. for (vn = VN_0; vn < E1HVN_MAX; vn++) {
  1722. int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
  1723. if (func >= E1H_FUNC_MAX)
  1724. break;
  1725. bp->mf_config[vn] =
  1726. MF_CFG_RD(bp, func_mf_config[func].config);
  1727. }
  1728. }
  1729. static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
  1730. {
  1731. if (cmng_type == CMNG_FNS_MINMAX) {
  1732. int vn;
  1733. /* clear cmng_enables */
  1734. bp->cmng.flags.cmng_enables = 0;
  1735. /* read mf conf from shmem */
  1736. if (read_cfg)
  1737. bnx2x_read_mf_cfg(bp);
  1738. /* Init rate shaping and fairness contexts */
  1739. bnx2x_init_port_minmax(bp);
  1740. /* vn_weight_sum and enable fairness if not 0 */
  1741. bnx2x_calc_vn_weight_sum(bp);
  1742. /* calculate and set min-max rate for each vn */
  1743. if (bp->port.pmf)
  1744. for (vn = VN_0; vn < E1HVN_MAX; vn++)
  1745. bnx2x_init_vn_minmax(bp, vn);
  1746. /* always enable rate shaping and fairness */
  1747. bp->cmng.flags.cmng_enables |=
  1748. CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
  1749. if (!bp->vn_weight_sum)
  1750. DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
  1751. " fairness will be disabled\n");
  1752. return;
  1753. }
  1754. /* rate shaping and fairness are disabled */
  1755. DP(NETIF_MSG_IFUP,
  1756. "rate shaping and fairness are disabled\n");
  1757. }
  1758. static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
  1759. {
  1760. int port = BP_PORT(bp);
  1761. int func;
  1762. int vn;
  1763. /* Set the attention towards other drivers on the same port */
  1764. for (vn = VN_0; vn < E1HVN_MAX; vn++) {
  1765. if (vn == BP_E1HVN(bp))
  1766. continue;
  1767. func = ((vn << 1) | port);
  1768. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
  1769. (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
  1770. }
  1771. }
  1772. /* This function is called upon link interrupt */
  1773. static void bnx2x_link_attn(struct bnx2x *bp)
  1774. {
  1775. /* Make sure that we are synced with the current statistics */
  1776. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  1777. bnx2x_link_update(&bp->link_params, &bp->link_vars);
  1778. if (bp->link_vars.link_up) {
  1779. /* dropless flow control */
  1780. if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
  1781. int port = BP_PORT(bp);
  1782. u32 pause_enabled = 0;
  1783. if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
  1784. pause_enabled = 1;
  1785. REG_WR(bp, BAR_USTRORM_INTMEM +
  1786. USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
  1787. pause_enabled);
  1788. }
  1789. if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
  1790. struct host_port_stats *pstats;
  1791. pstats = bnx2x_sp(bp, port_stats);
  1792. /* reset old bmac stats */
  1793. memset(&(pstats->mac_stx[0]), 0,
  1794. sizeof(struct mac_stx));
  1795. }
  1796. if (bp->state == BNX2X_STATE_OPEN)
  1797. bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
  1798. }
  1799. if (bp->link_vars.link_up && bp->link_vars.line_speed) {
  1800. int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
  1801. if (cmng_fns != CMNG_FNS_NONE) {
  1802. bnx2x_cmng_fns_init(bp, false, cmng_fns);
  1803. storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
  1804. } else
  1805. /* rate shaping and fairness are disabled */
  1806. DP(NETIF_MSG_IFUP,
  1807. "single function mode without fairness\n");
  1808. }
  1809. __bnx2x_link_report(bp);
  1810. if (IS_MF(bp))
  1811. bnx2x_link_sync_notify(bp);
  1812. }
  1813. void bnx2x__link_status_update(struct bnx2x *bp)
  1814. {
  1815. if (bp->state != BNX2X_STATE_OPEN)
  1816. return;
  1817. bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
  1818. if (bp->link_vars.link_up)
  1819. bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
  1820. else
  1821. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  1822. /* indicate link status */
  1823. bnx2x_link_report(bp);
  1824. }
  1825. static void bnx2x_pmf_update(struct bnx2x *bp)
  1826. {
  1827. int port = BP_PORT(bp);
  1828. u32 val;
  1829. bp->port.pmf = 1;
  1830. DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
  1831. /* enable nig attention */
  1832. val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
  1833. if (bp->common.int_block == INT_BLOCK_HC) {
  1834. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
  1835. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
  1836. } else if (CHIP_IS_E2(bp)) {
  1837. REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
  1838. REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
  1839. }
  1840. bnx2x_stats_handle(bp, STATS_EVENT_PMF);
  1841. }
  1842. /* end of Link */
  1843. /* slow path */
  1844. /*
  1845. * General service functions
  1846. */
  1847. /* send the MCP a request, block until there is a reply */
  1848. u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
  1849. {
  1850. int mb_idx = BP_FW_MB_IDX(bp);
  1851. u32 seq;
  1852. u32 rc = 0;
  1853. u32 cnt = 1;
  1854. u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
  1855. mutex_lock(&bp->fw_mb_mutex);
  1856. seq = ++bp->fw_seq;
  1857. SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
  1858. SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
  1859. DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
  1860. do {
  1861. /* let the FW do it's magic ... */
  1862. msleep(delay);
  1863. rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
  1864. /* Give the FW up to 5 second (500*10ms) */
  1865. } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
  1866. DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
  1867. cnt*delay, rc, seq);
  1868. /* is this a reply to our command? */
  1869. if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
  1870. rc &= FW_MSG_CODE_MASK;
  1871. else {
  1872. /* FW BUG! */
  1873. BNX2X_ERR("FW failed to respond!\n");
  1874. bnx2x_fw_dump(bp);
  1875. rc = 0;
  1876. }
  1877. mutex_unlock(&bp->fw_mb_mutex);
  1878. return rc;
  1879. }
  1880. static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
  1881. {
  1882. #ifdef BCM_CNIC
  1883. if (IS_FCOE_FP(fp) && IS_MF(bp))
  1884. return false;
  1885. #endif
  1886. return true;
  1887. }
  1888. /* must be called under rtnl_lock */
  1889. static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
  1890. {
  1891. u32 mask = (1 << cl_id);
  1892. /* initial seeting is BNX2X_ACCEPT_NONE */
  1893. u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
  1894. u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
  1895. u8 unmatched_unicast = 0;
  1896. if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
  1897. unmatched_unicast = 1;
  1898. if (filters & BNX2X_PROMISCUOUS_MODE) {
  1899. /* promiscious - accept all, drop none */
  1900. drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
  1901. accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
  1902. if (IS_MF_SI(bp)) {
  1903. /*
  1904. * SI mode defines to accept in promiscuos mode
  1905. * only unmatched packets
  1906. */
  1907. unmatched_unicast = 1;
  1908. accp_all_ucast = 0;
  1909. }
  1910. }
  1911. if (filters & BNX2X_ACCEPT_UNICAST) {
  1912. /* accept matched ucast */
  1913. drop_all_ucast = 0;
  1914. }
  1915. if (filters & BNX2X_ACCEPT_MULTICAST)
  1916. /* accept matched mcast */
  1917. drop_all_mcast = 0;
  1918. if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
  1919. /* accept all mcast */
  1920. drop_all_ucast = 0;
  1921. accp_all_ucast = 1;
  1922. }
  1923. if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
  1924. /* accept all mcast */
  1925. drop_all_mcast = 0;
  1926. accp_all_mcast = 1;
  1927. }
  1928. if (filters & BNX2X_ACCEPT_BROADCAST) {
  1929. /* accept (all) bcast */
  1930. drop_all_bcast = 0;
  1931. accp_all_bcast = 1;
  1932. }
  1933. bp->mac_filters.ucast_drop_all = drop_all_ucast ?
  1934. bp->mac_filters.ucast_drop_all | mask :
  1935. bp->mac_filters.ucast_drop_all & ~mask;
  1936. bp->mac_filters.mcast_drop_all = drop_all_mcast ?
  1937. bp->mac_filters.mcast_drop_all | mask :
  1938. bp->mac_filters.mcast_drop_all & ~mask;
  1939. bp->mac_filters.bcast_drop_all = drop_all_bcast ?
  1940. bp->mac_filters.bcast_drop_all | mask :
  1941. bp->mac_filters.bcast_drop_all & ~mask;
  1942. bp->mac_filters.ucast_accept_all = accp_all_ucast ?
  1943. bp->mac_filters.ucast_accept_all | mask :
  1944. bp->mac_filters.ucast_accept_all & ~mask;
  1945. bp->mac_filters.mcast_accept_all = accp_all_mcast ?
  1946. bp->mac_filters.mcast_accept_all | mask :
  1947. bp->mac_filters.mcast_accept_all & ~mask;
  1948. bp->mac_filters.bcast_accept_all = accp_all_bcast ?
  1949. bp->mac_filters.bcast_accept_all | mask :
  1950. bp->mac_filters.bcast_accept_all & ~mask;
  1951. bp->mac_filters.unmatched_unicast = unmatched_unicast ?
  1952. bp->mac_filters.unmatched_unicast | mask :
  1953. bp->mac_filters.unmatched_unicast & ~mask;
  1954. }
  1955. static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
  1956. {
  1957. struct tstorm_eth_function_common_config tcfg = {0};
  1958. u16 rss_flgs;
  1959. /* tpa */
  1960. if (p->func_flgs & FUNC_FLG_TPA)
  1961. tcfg.config_flags |=
  1962. TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
  1963. /* set rss flags */
  1964. rss_flgs = (p->rss->mode <<
  1965. TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
  1966. if (p->rss->cap & RSS_IPV4_CAP)
  1967. rss_flgs |= RSS_IPV4_CAP_MASK;
  1968. if (p->rss->cap & RSS_IPV4_TCP_CAP)
  1969. rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
  1970. if (p->rss->cap & RSS_IPV6_CAP)
  1971. rss_flgs |= RSS_IPV6_CAP_MASK;
  1972. if (p->rss->cap & RSS_IPV6_TCP_CAP)
  1973. rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
  1974. tcfg.config_flags |= rss_flgs;
  1975. tcfg.rss_result_mask = p->rss->result_mask;
  1976. storm_memset_func_cfg(bp, &tcfg, p->func_id);
  1977. /* Enable the function in the FW */
  1978. storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
  1979. storm_memset_func_en(bp, p->func_id, 1);
  1980. /* statistics */
  1981. if (p->func_flgs & FUNC_FLG_STATS) {
  1982. struct stats_indication_flags stats_flags = {0};
  1983. stats_flags.collect_eth = 1;
  1984. storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
  1985. storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
  1986. storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
  1987. storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
  1988. storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
  1989. storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
  1990. storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
  1991. storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
  1992. }
  1993. /* spq */
  1994. if (p->func_flgs & FUNC_FLG_SPQ) {
  1995. storm_memset_spq_addr(bp, p->spq_map, p->func_id);
  1996. REG_WR(bp, XSEM_REG_FAST_MEMORY +
  1997. XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
  1998. }
  1999. }
  2000. static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
  2001. struct bnx2x_fastpath *fp)
  2002. {
  2003. u16 flags = 0;
  2004. /* calculate queue flags */
  2005. flags |= QUEUE_FLG_CACHE_ALIGN;
  2006. flags |= QUEUE_FLG_HC;
  2007. flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
  2008. flags |= QUEUE_FLG_VLAN;
  2009. DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
  2010. if (!fp->disable_tpa)
  2011. flags |= QUEUE_FLG_TPA;
  2012. flags = stat_counter_valid(bp, fp) ?
  2013. (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
  2014. return flags;
  2015. }
  2016. static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
  2017. struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
  2018. struct bnx2x_rxq_init_params *rxq_init)
  2019. {
  2020. u16 max_sge = 0;
  2021. u16 sge_sz = 0;
  2022. u16 tpa_agg_size = 0;
  2023. /* calculate queue flags */
  2024. u16 flags = bnx2x_get_cl_flags(bp, fp);
  2025. if (!fp->disable_tpa) {
  2026. pause->sge_th_hi = 250;
  2027. pause->sge_th_lo = 150;
  2028. tpa_agg_size = min_t(u32,
  2029. (min_t(u32, 8, MAX_SKB_FRAGS) *
  2030. SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
  2031. max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
  2032. SGE_PAGE_SHIFT;
  2033. max_sge = ((max_sge + PAGES_PER_SGE - 1) &
  2034. (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
  2035. sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
  2036. 0xffff);
  2037. }
  2038. /* pause - not for e1 */
  2039. if (!CHIP_IS_E1(bp)) {
  2040. pause->bd_th_hi = 350;
  2041. pause->bd_th_lo = 250;
  2042. pause->rcq_th_hi = 350;
  2043. pause->rcq_th_lo = 250;
  2044. pause->sge_th_hi = 0;
  2045. pause->sge_th_lo = 0;
  2046. pause->pri_map = 1;
  2047. }
  2048. /* rxq setup */
  2049. rxq_init->flags = flags;
  2050. rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
  2051. rxq_init->dscr_map = fp->rx_desc_mapping;
  2052. rxq_init->sge_map = fp->rx_sge_mapping;
  2053. rxq_init->rcq_map = fp->rx_comp_mapping;
  2054. rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
  2055. /* Always use mini-jumbo MTU for FCoE L2 ring */
  2056. if (IS_FCOE_FP(fp))
  2057. rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
  2058. else
  2059. rxq_init->mtu = bp->dev->mtu;
  2060. rxq_init->buf_sz = fp->rx_buf_size;
  2061. rxq_init->cl_qzone_id = fp->cl_qzone_id;
  2062. rxq_init->cl_id = fp->cl_id;
  2063. rxq_init->spcl_id = fp->cl_id;
  2064. rxq_init->stat_id = fp->cl_id;
  2065. rxq_init->tpa_agg_sz = tpa_agg_size;
  2066. rxq_init->sge_buf_sz = sge_sz;
  2067. rxq_init->max_sges_pkt = max_sge;
  2068. rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
  2069. rxq_init->fw_sb_id = fp->fw_sb_id;
  2070. if (IS_FCOE_FP(fp))
  2071. rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
  2072. else
  2073. rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
  2074. rxq_init->cid = HW_CID(bp, fp->cid);
  2075. rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
  2076. }
  2077. static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
  2078. struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
  2079. {
  2080. u16 flags = bnx2x_get_cl_flags(bp, fp);
  2081. txq_init->flags = flags;
  2082. txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
  2083. txq_init->dscr_map = fp->tx_desc_mapping;
  2084. txq_init->stat_id = fp->cl_id;
  2085. txq_init->cid = HW_CID(bp, fp->cid);
  2086. txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
  2087. txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
  2088. txq_init->fw_sb_id = fp->fw_sb_id;
  2089. if (IS_FCOE_FP(fp)) {
  2090. txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
  2091. txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
  2092. }
  2093. txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
  2094. }
  2095. static void bnx2x_pf_init(struct bnx2x *bp)
  2096. {
  2097. struct bnx2x_func_init_params func_init = {0};
  2098. struct bnx2x_rss_params rss = {0};
  2099. struct event_ring_data eq_data = { {0} };
  2100. u16 flags;
  2101. /* pf specific setups */
  2102. if (!CHIP_IS_E1(bp))
  2103. storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
  2104. if (CHIP_IS_E2(bp)) {
  2105. /* reset IGU PF statistics: MSIX + ATTN */
  2106. /* PF */
  2107. REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
  2108. BNX2X_IGU_STAS_MSG_VF_CNT*4 +
  2109. (CHIP_MODE_IS_4_PORT(bp) ?
  2110. BP_FUNC(bp) : BP_VN(bp))*4, 0);
  2111. /* ATTN */
  2112. REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
  2113. BNX2X_IGU_STAS_MSG_VF_CNT*4 +
  2114. BNX2X_IGU_STAS_MSG_PF_CNT*4 +
  2115. (CHIP_MODE_IS_4_PORT(bp) ?
  2116. BP_FUNC(bp) : BP_VN(bp))*4, 0);
  2117. }
  2118. /* function setup flags */
  2119. flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
  2120. if (CHIP_IS_E1x(bp))
  2121. flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
  2122. else
  2123. flags |= FUNC_FLG_TPA;
  2124. /* function setup */
  2125. /**
  2126. * Although RSS is meaningless when there is a single HW queue we
  2127. * still need it enabled in order to have HW Rx hash generated.
  2128. */
  2129. rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
  2130. RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
  2131. rss.mode = bp->multi_mode;
  2132. rss.result_mask = MULTI_MASK;
  2133. func_init.rss = &rss;
  2134. func_init.func_flgs = flags;
  2135. func_init.pf_id = BP_FUNC(bp);
  2136. func_init.func_id = BP_FUNC(bp);
  2137. func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
  2138. func_init.spq_map = bp->spq_mapping;
  2139. func_init.spq_prod = bp->spq_prod_idx;
  2140. bnx2x_func_init(bp, &func_init);
  2141. memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
  2142. /*
  2143. Congestion management values depend on the link rate
  2144. There is no active link so initial link rate is set to 10 Gbps.
  2145. When the link comes up The congestion management values are
  2146. re-calculated according to the actual link rate.
  2147. */
  2148. bp->link_vars.line_speed = SPEED_10000;
  2149. bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
  2150. /* Only the PMF sets the HW */
  2151. if (bp->port.pmf)
  2152. storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
  2153. /* no rx until link is up */
  2154. bp->rx_mode = BNX2X_RX_MODE_NONE;
  2155. bnx2x_set_storm_rx_mode(bp);
  2156. /* init Event Queue */
  2157. eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
  2158. eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
  2159. eq_data.producer = bp->eq_prod;
  2160. eq_data.index_id = HC_SP_INDEX_EQ_CONS;
  2161. eq_data.sb_id = DEF_SB_ID;
  2162. storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
  2163. }
  2164. static void bnx2x_e1h_disable(struct bnx2x *bp)
  2165. {
  2166. int port = BP_PORT(bp);
  2167. netif_tx_disable(bp->dev);
  2168. REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
  2169. netif_carrier_off(bp->dev);
  2170. }
  2171. static void bnx2x_e1h_enable(struct bnx2x *bp)
  2172. {
  2173. int port = BP_PORT(bp);
  2174. REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
  2175. /* Tx queue should be only reenabled */
  2176. netif_tx_wake_all_queues(bp->dev);
  2177. /*
  2178. * Should not call netif_carrier_on since it will be called if the link
  2179. * is up when checking for link state
  2180. */
  2181. }
  2182. /* called due to MCP event (on pmf):
  2183. * reread new bandwidth configuration
  2184. * configure FW
  2185. * notify others function about the change
  2186. */
  2187. static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
  2188. {
  2189. if (bp->link_vars.link_up) {
  2190. bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
  2191. bnx2x_link_sync_notify(bp);
  2192. }
  2193. storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
  2194. }
  2195. static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
  2196. {
  2197. bnx2x_config_mf_bw(bp);
  2198. bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
  2199. }
  2200. static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
  2201. {
  2202. DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
  2203. if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
  2204. /*
  2205. * This is the only place besides the function initialization
  2206. * where the bp->flags can change so it is done without any
  2207. * locks
  2208. */
  2209. if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
  2210. DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
  2211. bp->flags |= MF_FUNC_DIS;
  2212. bnx2x_e1h_disable(bp);
  2213. } else {
  2214. DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
  2215. bp->flags &= ~MF_FUNC_DIS;
  2216. bnx2x_e1h_enable(bp);
  2217. }
  2218. dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
  2219. }
  2220. if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
  2221. bnx2x_config_mf_bw(bp);
  2222. dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
  2223. }
  2224. /* Report results to MCP */
  2225. if (dcc_event)
  2226. bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
  2227. else
  2228. bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
  2229. }
  2230. /* must be called under the spq lock */
  2231. static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
  2232. {
  2233. struct eth_spe *next_spe = bp->spq_prod_bd;
  2234. if (bp->spq_prod_bd == bp->spq_last_bd) {
  2235. bp->spq_prod_bd = bp->spq;
  2236. bp->spq_prod_idx = 0;
  2237. DP(NETIF_MSG_TIMER, "end of spq\n");
  2238. } else {
  2239. bp->spq_prod_bd++;
  2240. bp->spq_prod_idx++;
  2241. }
  2242. return next_spe;
  2243. }
  2244. /* must be called under the spq lock */
  2245. static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
  2246. {
  2247. int func = BP_FUNC(bp);
  2248. /* Make sure that BD data is updated before writing the producer */
  2249. wmb();
  2250. REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
  2251. bp->spq_prod_idx);
  2252. mmiowb();
  2253. }
  2254. /* the slow path queue is odd since completions arrive on the fastpath ring */
  2255. int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
  2256. u32 data_hi, u32 data_lo, int common)
  2257. {
  2258. struct eth_spe *spe;
  2259. u16 type;
  2260. #ifdef BNX2X_STOP_ON_ERROR
  2261. if (unlikely(bp->panic))
  2262. return -EIO;
  2263. #endif
  2264. spin_lock_bh(&bp->spq_lock);
  2265. if (common) {
  2266. if (!atomic_read(&bp->eq_spq_left)) {
  2267. BNX2X_ERR("BUG! EQ ring full!\n");
  2268. spin_unlock_bh(&bp->spq_lock);
  2269. bnx2x_panic();
  2270. return -EBUSY;
  2271. }
  2272. } else if (!atomic_read(&bp->cq_spq_left)) {
  2273. BNX2X_ERR("BUG! SPQ ring full!\n");
  2274. spin_unlock_bh(&bp->spq_lock);
  2275. bnx2x_panic();
  2276. return -EBUSY;
  2277. }
  2278. spe = bnx2x_sp_get_next(bp);
  2279. /* CID needs port number to be encoded int it */
  2280. spe->hdr.conn_and_cmd_data =
  2281. cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
  2282. HW_CID(bp, cid));
  2283. if (common)
  2284. /* Common ramrods:
  2285. * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
  2286. * TRAFFIC_STOP, TRAFFIC_START
  2287. */
  2288. type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
  2289. & SPE_HDR_CONN_TYPE;
  2290. else
  2291. /* ETH ramrods: SETUP, HALT */
  2292. type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
  2293. & SPE_HDR_CONN_TYPE;
  2294. type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
  2295. SPE_HDR_FUNCTION_ID);
  2296. spe->hdr.type = cpu_to_le16(type);
  2297. spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
  2298. spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
  2299. /* stats ramrod has it's own slot on the spq */
  2300. if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
  2301. /* It's ok if the actual decrement is issued towards the memory
  2302. * somewhere between the spin_lock and spin_unlock. Thus no
  2303. * more explict memory barrier is needed.
  2304. */
  2305. if (common)
  2306. atomic_dec(&bp->eq_spq_left);
  2307. else
  2308. atomic_dec(&bp->cq_spq_left);
  2309. }
  2310. DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
  2311. "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
  2312. "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
  2313. bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
  2314. (u32)(U64_LO(bp->spq_mapping) +
  2315. (void *)bp->spq_prod_bd - (void *)bp->spq), command,
  2316. HW_CID(bp, cid), data_hi, data_lo, type,
  2317. atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
  2318. bnx2x_sp_prod_update(bp);
  2319. spin_unlock_bh(&bp->spq_lock);
  2320. return 0;
  2321. }
  2322. /* acquire split MCP access lock register */
  2323. static int bnx2x_acquire_alr(struct bnx2x *bp)
  2324. {
  2325. u32 j, val;
  2326. int rc = 0;
  2327. might_sleep();
  2328. for (j = 0; j < 1000; j++) {
  2329. val = (1UL << 31);
  2330. REG_WR(bp, GRCBASE_MCP + 0x9c, val);
  2331. val = REG_RD(bp, GRCBASE_MCP + 0x9c);
  2332. if (val & (1L << 31))
  2333. break;
  2334. msleep(5);
  2335. }
  2336. if (!(val & (1L << 31))) {
  2337. BNX2X_ERR("Cannot acquire MCP access lock register\n");
  2338. rc = -EBUSY;
  2339. }
  2340. return rc;
  2341. }
  2342. /* release split MCP access lock register */
  2343. static void bnx2x_release_alr(struct bnx2x *bp)
  2344. {
  2345. REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
  2346. }
  2347. #define BNX2X_DEF_SB_ATT_IDX 0x0001
  2348. #define BNX2X_DEF_SB_IDX 0x0002
  2349. static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
  2350. {
  2351. struct host_sp_status_block *def_sb = bp->def_status_blk;
  2352. u16 rc = 0;
  2353. barrier(); /* status block is written to by the chip */
  2354. if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
  2355. bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
  2356. rc |= BNX2X_DEF_SB_ATT_IDX;
  2357. }
  2358. if (bp->def_idx != def_sb->sp_sb.running_index) {
  2359. bp->def_idx = def_sb->sp_sb.running_index;
  2360. rc |= BNX2X_DEF_SB_IDX;
  2361. }
  2362. /* Do not reorder: indecies reading should complete before handling */
  2363. barrier();
  2364. return rc;
  2365. }
  2366. /*
  2367. * slow path service functions
  2368. */
  2369. static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
  2370. {
  2371. int port = BP_PORT(bp);
  2372. u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
  2373. MISC_REG_AEU_MASK_ATTN_FUNC_0;
  2374. u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
  2375. NIG_REG_MASK_INTERRUPT_PORT0;
  2376. u32 aeu_mask;
  2377. u32 nig_mask = 0;
  2378. u32 reg_addr;
  2379. if (bp->attn_state & asserted)
  2380. BNX2X_ERR("IGU ERROR\n");
  2381. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
  2382. aeu_mask = REG_RD(bp, aeu_addr);
  2383. DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
  2384. aeu_mask, asserted);
  2385. aeu_mask &= ~(asserted & 0x3ff);
  2386. DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
  2387. REG_WR(bp, aeu_addr, aeu_mask);
  2388. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
  2389. DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
  2390. bp->attn_state |= asserted;
  2391. DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
  2392. if (asserted & ATTN_HARD_WIRED_MASK) {
  2393. if (asserted & ATTN_NIG_FOR_FUNC) {
  2394. bnx2x_acquire_phy_lock(bp);
  2395. /* save nig interrupt mask */
  2396. nig_mask = REG_RD(bp, nig_int_mask_addr);
  2397. REG_WR(bp, nig_int_mask_addr, 0);
  2398. bnx2x_link_attn(bp);
  2399. /* handle unicore attn? */
  2400. }
  2401. if (asserted & ATTN_SW_TIMER_4_FUNC)
  2402. DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
  2403. if (asserted & GPIO_2_FUNC)
  2404. DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
  2405. if (asserted & GPIO_3_FUNC)
  2406. DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
  2407. if (asserted & GPIO_4_FUNC)
  2408. DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
  2409. if (port == 0) {
  2410. if (asserted & ATTN_GENERAL_ATTN_1) {
  2411. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
  2412. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
  2413. }
  2414. if (asserted & ATTN_GENERAL_ATTN_2) {
  2415. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
  2416. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
  2417. }
  2418. if (asserted & ATTN_GENERAL_ATTN_3) {
  2419. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
  2420. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
  2421. }
  2422. } else {
  2423. if (asserted & ATTN_GENERAL_ATTN_4) {
  2424. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
  2425. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
  2426. }
  2427. if (asserted & ATTN_GENERAL_ATTN_5) {
  2428. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
  2429. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
  2430. }
  2431. if (asserted & ATTN_GENERAL_ATTN_6) {
  2432. DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
  2433. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
  2434. }
  2435. }
  2436. } /* if hardwired */
  2437. if (bp->common.int_block == INT_BLOCK_HC)
  2438. reg_addr = (HC_REG_COMMAND_REG + port*32 +
  2439. COMMAND_REG_ATTN_BITS_SET);
  2440. else
  2441. reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
  2442. DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
  2443. (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
  2444. REG_WR(bp, reg_addr, asserted);
  2445. /* now set back the mask */
  2446. if (asserted & ATTN_NIG_FOR_FUNC) {
  2447. REG_WR(bp, nig_int_mask_addr, nig_mask);
  2448. bnx2x_release_phy_lock(bp);
  2449. }
  2450. }
  2451. static inline void bnx2x_fan_failure(struct bnx2x *bp)
  2452. {
  2453. int port = BP_PORT(bp);
  2454. u32 ext_phy_config;
  2455. /* mark the failure */
  2456. ext_phy_config =
  2457. SHMEM_RD(bp,
  2458. dev_info.port_hw_config[port].external_phy_config);
  2459. ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
  2460. ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
  2461. SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
  2462. ext_phy_config);
  2463. /* log the failure */
  2464. netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
  2465. " the driver to shutdown the card to prevent permanent"
  2466. " damage. Please contact OEM Support for assistance\n");
  2467. }
  2468. static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
  2469. {
  2470. int port = BP_PORT(bp);
  2471. int reg_offset;
  2472. u32 val;
  2473. reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
  2474. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
  2475. if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
  2476. val = REG_RD(bp, reg_offset);
  2477. val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
  2478. REG_WR(bp, reg_offset, val);
  2479. BNX2X_ERR("SPIO5 hw attention\n");
  2480. /* Fan failure attention */
  2481. bnx2x_hw_reset_phy(&bp->link_params);
  2482. bnx2x_fan_failure(bp);
  2483. }
  2484. if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
  2485. AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
  2486. bnx2x_acquire_phy_lock(bp);
  2487. bnx2x_handle_module_detect_int(&bp->link_params);
  2488. bnx2x_release_phy_lock(bp);
  2489. }
  2490. if (attn & HW_INTERRUT_ASSERT_SET_0) {
  2491. val = REG_RD(bp, reg_offset);
  2492. val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
  2493. REG_WR(bp, reg_offset, val);
  2494. BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
  2495. (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
  2496. bnx2x_panic();
  2497. }
  2498. }
  2499. static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
  2500. {
  2501. u32 val;
  2502. if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
  2503. val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
  2504. BNX2X_ERR("DB hw attention 0x%x\n", val);
  2505. /* DORQ discard attention */
  2506. if (val & 0x2)
  2507. BNX2X_ERR("FATAL error from DORQ\n");
  2508. }
  2509. if (attn & HW_INTERRUT_ASSERT_SET_1) {
  2510. int port = BP_PORT(bp);
  2511. int reg_offset;
  2512. reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
  2513. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
  2514. val = REG_RD(bp, reg_offset);
  2515. val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
  2516. REG_WR(bp, reg_offset, val);
  2517. BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
  2518. (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
  2519. bnx2x_panic();
  2520. }
  2521. }
  2522. static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
  2523. {
  2524. u32 val;
  2525. if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
  2526. val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
  2527. BNX2X_ERR("CFC hw attention 0x%x\n", val);
  2528. /* CFC error attention */
  2529. if (val & 0x2)
  2530. BNX2X_ERR("FATAL error from CFC\n");
  2531. }
  2532. if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
  2533. val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
  2534. BNX2X_ERR("PXP hw attention 0x%x\n", val);
  2535. /* RQ_USDMDP_FIFO_OVERFLOW */
  2536. if (val & 0x18000)
  2537. BNX2X_ERR("FATAL error from PXP\n");
  2538. if (CHIP_IS_E2(bp)) {
  2539. val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
  2540. BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
  2541. }
  2542. }
  2543. if (attn & HW_INTERRUT_ASSERT_SET_2) {
  2544. int port = BP_PORT(bp);
  2545. int reg_offset;
  2546. reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
  2547. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
  2548. val = REG_RD(bp, reg_offset);
  2549. val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
  2550. REG_WR(bp, reg_offset, val);
  2551. BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
  2552. (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
  2553. bnx2x_panic();
  2554. }
  2555. }
  2556. static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
  2557. {
  2558. u32 val;
  2559. if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
  2560. if (attn & BNX2X_PMF_LINK_ASSERT) {
  2561. int func = BP_FUNC(bp);
  2562. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
  2563. bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
  2564. func_mf_config[BP_ABS_FUNC(bp)].config);
  2565. val = SHMEM_RD(bp,
  2566. func_mb[BP_FW_MB_IDX(bp)].drv_status);
  2567. if (val & DRV_STATUS_DCC_EVENT_MASK)
  2568. bnx2x_dcc_event(bp,
  2569. (val & DRV_STATUS_DCC_EVENT_MASK));
  2570. if (val & DRV_STATUS_SET_MF_BW)
  2571. bnx2x_set_mf_bw(bp);
  2572. if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
  2573. bnx2x_pmf_update(bp);
  2574. /* Always call it here: bnx2x_link_report() will
  2575. * prevent the link indication duplication.
  2576. */
  2577. bnx2x__link_status_update(bp);
  2578. if (bp->port.pmf &&
  2579. (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
  2580. bp->dcbx_enabled > 0)
  2581. /* start dcbx state machine */
  2582. bnx2x_dcbx_set_params(bp,
  2583. BNX2X_DCBX_STATE_NEG_RECEIVED);
  2584. } else if (attn & BNX2X_MC_ASSERT_BITS) {
  2585. BNX2X_ERR("MC assert!\n");
  2586. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
  2587. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
  2588. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
  2589. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
  2590. bnx2x_panic();
  2591. } else if (attn & BNX2X_MCP_ASSERT) {
  2592. BNX2X_ERR("MCP assert!\n");
  2593. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
  2594. bnx2x_fw_dump(bp);
  2595. } else
  2596. BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
  2597. }
  2598. if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
  2599. BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
  2600. if (attn & BNX2X_GRC_TIMEOUT) {
  2601. val = CHIP_IS_E1(bp) ? 0 :
  2602. REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
  2603. BNX2X_ERR("GRC time-out 0x%08x\n", val);
  2604. }
  2605. if (attn & BNX2X_GRC_RSV) {
  2606. val = CHIP_IS_E1(bp) ? 0 :
  2607. REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
  2608. BNX2X_ERR("GRC reserved 0x%08x\n", val);
  2609. }
  2610. REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
  2611. }
  2612. }
  2613. #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
  2614. #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
  2615. #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
  2616. #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
  2617. #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
  2618. /*
  2619. * should be run under rtnl lock
  2620. */
  2621. static inline void bnx2x_set_reset_done(struct bnx2x *bp)
  2622. {
  2623. u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2624. val &= ~(1 << RESET_DONE_FLAG_SHIFT);
  2625. REG_WR(bp, BNX2X_MISC_GEN_REG, val);
  2626. barrier();
  2627. mmiowb();
  2628. }
  2629. /*
  2630. * should be run under rtnl lock
  2631. */
  2632. static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
  2633. {
  2634. u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2635. val |= (1 << 16);
  2636. REG_WR(bp, BNX2X_MISC_GEN_REG, val);
  2637. barrier();
  2638. mmiowb();
  2639. }
  2640. /*
  2641. * should be run under rtnl lock
  2642. */
  2643. bool bnx2x_reset_is_done(struct bnx2x *bp)
  2644. {
  2645. u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2646. DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
  2647. return (val & RESET_DONE_FLAG_MASK) ? false : true;
  2648. }
  2649. /*
  2650. * should be run under rtnl lock
  2651. */
  2652. inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
  2653. {
  2654. u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2655. DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
  2656. val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
  2657. REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
  2658. barrier();
  2659. mmiowb();
  2660. }
  2661. /*
  2662. * should be run under rtnl lock
  2663. */
  2664. u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
  2665. {
  2666. u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2667. DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
  2668. val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
  2669. REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
  2670. barrier();
  2671. mmiowb();
  2672. return val1;
  2673. }
  2674. /*
  2675. * should be run under rtnl lock
  2676. */
  2677. static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
  2678. {
  2679. return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
  2680. }
  2681. static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
  2682. {
  2683. u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
  2684. REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
  2685. }
  2686. static inline void _print_next_block(int idx, const char *blk)
  2687. {
  2688. if (idx)
  2689. pr_cont(", ");
  2690. pr_cont("%s", blk);
  2691. }
  2692. static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
  2693. {
  2694. int i = 0;
  2695. u32 cur_bit = 0;
  2696. for (i = 0; sig; i++) {
  2697. cur_bit = ((u32)0x1 << i);
  2698. if (sig & cur_bit) {
  2699. switch (cur_bit) {
  2700. case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
  2701. _print_next_block(par_num++, "BRB");
  2702. break;
  2703. case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
  2704. _print_next_block(par_num++, "PARSER");
  2705. break;
  2706. case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
  2707. _print_next_block(par_num++, "TSDM");
  2708. break;
  2709. case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
  2710. _print_next_block(par_num++, "SEARCHER");
  2711. break;
  2712. case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
  2713. _print_next_block(par_num++, "TSEMI");
  2714. break;
  2715. }
  2716. /* Clear the bit */
  2717. sig &= ~cur_bit;
  2718. }
  2719. }
  2720. return par_num;
  2721. }
  2722. static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
  2723. {
  2724. int i = 0;
  2725. u32 cur_bit = 0;
  2726. for (i = 0; sig; i++) {
  2727. cur_bit = ((u32)0x1 << i);
  2728. if (sig & cur_bit) {
  2729. switch (cur_bit) {
  2730. case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
  2731. _print_next_block(par_num++, "PBCLIENT");
  2732. break;
  2733. case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
  2734. _print_next_block(par_num++, "QM");
  2735. break;
  2736. case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
  2737. _print_next_block(par_num++, "XSDM");
  2738. break;
  2739. case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
  2740. _print_next_block(par_num++, "XSEMI");
  2741. break;
  2742. case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
  2743. _print_next_block(par_num++, "DOORBELLQ");
  2744. break;
  2745. case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
  2746. _print_next_block(par_num++, "VAUX PCI CORE");
  2747. break;
  2748. case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
  2749. _print_next_block(par_num++, "DEBUG");
  2750. break;
  2751. case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
  2752. _print_next_block(par_num++, "USDM");
  2753. break;
  2754. case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
  2755. _print_next_block(par_num++, "USEMI");
  2756. break;
  2757. case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
  2758. _print_next_block(par_num++, "UPB");
  2759. break;
  2760. case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
  2761. _print_next_block(par_num++, "CSDM");
  2762. break;
  2763. }
  2764. /* Clear the bit */
  2765. sig &= ~cur_bit;
  2766. }
  2767. }
  2768. return par_num;
  2769. }
  2770. static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
  2771. {
  2772. int i = 0;
  2773. u32 cur_bit = 0;
  2774. for (i = 0; sig; i++) {
  2775. cur_bit = ((u32)0x1 << i);
  2776. if (sig & cur_bit) {
  2777. switch (cur_bit) {
  2778. case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
  2779. _print_next_block(par_num++, "CSEMI");
  2780. break;
  2781. case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
  2782. _print_next_block(par_num++, "PXP");
  2783. break;
  2784. case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
  2785. _print_next_block(par_num++,
  2786. "PXPPCICLOCKCLIENT");
  2787. break;
  2788. case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
  2789. _print_next_block(par_num++, "CFC");
  2790. break;
  2791. case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
  2792. _print_next_block(par_num++, "CDU");
  2793. break;
  2794. case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
  2795. _print_next_block(par_num++, "IGU");
  2796. break;
  2797. case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
  2798. _print_next_block(par_num++, "MISC");
  2799. break;
  2800. }
  2801. /* Clear the bit */
  2802. sig &= ~cur_bit;
  2803. }
  2804. }
  2805. return par_num;
  2806. }
  2807. static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
  2808. {
  2809. int i = 0;
  2810. u32 cur_bit = 0;
  2811. for (i = 0; sig; i++) {
  2812. cur_bit = ((u32)0x1 << i);
  2813. if (sig & cur_bit) {
  2814. switch (cur_bit) {
  2815. case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
  2816. _print_next_block(par_num++, "MCP ROM");
  2817. break;
  2818. case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
  2819. _print_next_block(par_num++, "MCP UMP RX");
  2820. break;
  2821. case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
  2822. _print_next_block(par_num++, "MCP UMP TX");
  2823. break;
  2824. case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
  2825. _print_next_block(par_num++, "MCP SCPAD");
  2826. break;
  2827. }
  2828. /* Clear the bit */
  2829. sig &= ~cur_bit;
  2830. }
  2831. }
  2832. return par_num;
  2833. }
  2834. static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
  2835. u32 sig2, u32 sig3)
  2836. {
  2837. if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
  2838. (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
  2839. int par_num = 0;
  2840. DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
  2841. "[0]:0x%08x [1]:0x%08x "
  2842. "[2]:0x%08x [3]:0x%08x\n",
  2843. sig0 & HW_PRTY_ASSERT_SET_0,
  2844. sig1 & HW_PRTY_ASSERT_SET_1,
  2845. sig2 & HW_PRTY_ASSERT_SET_2,
  2846. sig3 & HW_PRTY_ASSERT_SET_3);
  2847. printk(KERN_ERR"%s: Parity errors detected in blocks: ",
  2848. bp->dev->name);
  2849. par_num = bnx2x_print_blocks_with_parity0(
  2850. sig0 & HW_PRTY_ASSERT_SET_0, par_num);
  2851. par_num = bnx2x_print_blocks_with_parity1(
  2852. sig1 & HW_PRTY_ASSERT_SET_1, par_num);
  2853. par_num = bnx2x_print_blocks_with_parity2(
  2854. sig2 & HW_PRTY_ASSERT_SET_2, par_num);
  2855. par_num = bnx2x_print_blocks_with_parity3(
  2856. sig3 & HW_PRTY_ASSERT_SET_3, par_num);
  2857. printk("\n");
  2858. return true;
  2859. } else
  2860. return false;
  2861. }
  2862. bool bnx2x_chk_parity_attn(struct bnx2x *bp)
  2863. {
  2864. struct attn_route attn;
  2865. int port = BP_PORT(bp);
  2866. attn.sig[0] = REG_RD(bp,
  2867. MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
  2868. port*4);
  2869. attn.sig[1] = REG_RD(bp,
  2870. MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
  2871. port*4);
  2872. attn.sig[2] = REG_RD(bp,
  2873. MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
  2874. port*4);
  2875. attn.sig[3] = REG_RD(bp,
  2876. MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
  2877. port*4);
  2878. return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
  2879. attn.sig[3]);
  2880. }
  2881. static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
  2882. {
  2883. u32 val;
  2884. if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
  2885. val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
  2886. BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
  2887. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
  2888. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2889. "ADDRESS_ERROR\n");
  2890. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
  2891. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2892. "INCORRECT_RCV_BEHAVIOR\n");
  2893. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
  2894. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2895. "WAS_ERROR_ATTN\n");
  2896. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
  2897. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2898. "VF_LENGTH_VIOLATION_ATTN\n");
  2899. if (val &
  2900. PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
  2901. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2902. "VF_GRC_SPACE_VIOLATION_ATTN\n");
  2903. if (val &
  2904. PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
  2905. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2906. "VF_MSIX_BAR_VIOLATION_ATTN\n");
  2907. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
  2908. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2909. "TCPL_ERROR_ATTN\n");
  2910. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
  2911. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2912. "TCPL_IN_TWO_RCBS_ATTN\n");
  2913. if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
  2914. BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
  2915. "CSSNOOP_FIFO_OVERFLOW\n");
  2916. }
  2917. if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
  2918. val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
  2919. BNX2X_ERR("ATC hw attention 0x%x\n", val);
  2920. if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
  2921. BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
  2922. if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
  2923. BNX2X_ERR("ATC_ATC_INT_STS_REG"
  2924. "_ATC_TCPL_TO_NOT_PEND\n");
  2925. if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
  2926. BNX2X_ERR("ATC_ATC_INT_STS_REG_"
  2927. "ATC_GPA_MULTIPLE_HITS\n");
  2928. if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
  2929. BNX2X_ERR("ATC_ATC_INT_STS_REG_"
  2930. "ATC_RCPL_TO_EMPTY_CNT\n");
  2931. if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
  2932. BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
  2933. if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
  2934. BNX2X_ERR("ATC_ATC_INT_STS_REG_"
  2935. "ATC_IREQ_LESS_THAN_STU\n");
  2936. }
  2937. if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
  2938. AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
  2939. BNX2X_ERR("FATAL parity attention set4 0x%x\n",
  2940. (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
  2941. AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
  2942. }
  2943. }
  2944. static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
  2945. {
  2946. struct attn_route attn, *group_mask;
  2947. int port = BP_PORT(bp);
  2948. int index;
  2949. u32 reg_addr;
  2950. u32 val;
  2951. u32 aeu_mask;
  2952. /* need to take HW lock because MCP or other port might also
  2953. try to handle this event */
  2954. bnx2x_acquire_alr(bp);
  2955. if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
  2956. bp->recovery_state = BNX2X_RECOVERY_INIT;
  2957. bnx2x_set_reset_in_progress(bp);
  2958. schedule_delayed_work(&bp->reset_task, 0);
  2959. /* Disable HW interrupts */
  2960. bnx2x_int_disable(bp);
  2961. bnx2x_release_alr(bp);
  2962. /* In case of parity errors don't handle attentions so that
  2963. * other function would "see" parity errors.
  2964. */
  2965. return;
  2966. }
  2967. attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
  2968. attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
  2969. attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
  2970. attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
  2971. if (CHIP_IS_E2(bp))
  2972. attn.sig[4] =
  2973. REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
  2974. else
  2975. attn.sig[4] = 0;
  2976. DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
  2977. attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
  2978. for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
  2979. if (deasserted & (1 << index)) {
  2980. group_mask = &bp->attn_group[index];
  2981. DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
  2982. "%08x %08x %08x\n",
  2983. index,
  2984. group_mask->sig[0], group_mask->sig[1],
  2985. group_mask->sig[2], group_mask->sig[3],
  2986. group_mask->sig[4]);
  2987. bnx2x_attn_int_deasserted4(bp,
  2988. attn.sig[4] & group_mask->sig[4]);
  2989. bnx2x_attn_int_deasserted3(bp,
  2990. attn.sig[3] & group_mask->sig[3]);
  2991. bnx2x_attn_int_deasserted1(bp,
  2992. attn.sig[1] & group_mask->sig[1]);
  2993. bnx2x_attn_int_deasserted2(bp,
  2994. attn.sig[2] & group_mask->sig[2]);
  2995. bnx2x_attn_int_deasserted0(bp,
  2996. attn.sig[0] & group_mask->sig[0]);
  2997. }
  2998. }
  2999. bnx2x_release_alr(bp);
  3000. if (bp->common.int_block == INT_BLOCK_HC)
  3001. reg_addr = (HC_REG_COMMAND_REG + port*32 +
  3002. COMMAND_REG_ATTN_BITS_CLR);
  3003. else
  3004. reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
  3005. val = ~deasserted;
  3006. DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
  3007. (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
  3008. REG_WR(bp, reg_addr, val);
  3009. if (~bp->attn_state & deasserted)
  3010. BNX2X_ERR("IGU ERROR\n");
  3011. reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
  3012. MISC_REG_AEU_MASK_ATTN_FUNC_0;
  3013. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
  3014. aeu_mask = REG_RD(bp, reg_addr);
  3015. DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
  3016. aeu_mask, deasserted);
  3017. aeu_mask |= (deasserted & 0x3ff);
  3018. DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
  3019. REG_WR(bp, reg_addr, aeu_mask);
  3020. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
  3021. DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
  3022. bp->attn_state &= ~deasserted;
  3023. DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
  3024. }
  3025. static void bnx2x_attn_int(struct bnx2x *bp)
  3026. {
  3027. /* read local copy of bits */
  3028. u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
  3029. attn_bits);
  3030. u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
  3031. attn_bits_ack);
  3032. u32 attn_state = bp->attn_state;
  3033. /* look for changed bits */
  3034. u32 asserted = attn_bits & ~attn_ack & ~attn_state;
  3035. u32 deasserted = ~attn_bits & attn_ack & attn_state;
  3036. DP(NETIF_MSG_HW,
  3037. "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
  3038. attn_bits, attn_ack, asserted, deasserted);
  3039. if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
  3040. BNX2X_ERR("BAD attention state\n");
  3041. /* handle bits that were raised */
  3042. if (asserted)
  3043. bnx2x_attn_int_asserted(bp, asserted);
  3044. if (deasserted)
  3045. bnx2x_attn_int_deasserted(bp, deasserted);
  3046. }
  3047. static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
  3048. {
  3049. /* No memory barriers */
  3050. storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
  3051. mmiowb(); /* keep prod updates ordered */
  3052. }
  3053. #ifdef BCM_CNIC
  3054. static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
  3055. union event_ring_elem *elem)
  3056. {
  3057. if (!bp->cnic_eth_dev.starting_cid ||
  3058. (cid < bp->cnic_eth_dev.starting_cid &&
  3059. cid != bp->cnic_eth_dev.iscsi_l2_cid))
  3060. return 1;
  3061. DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
  3062. if (unlikely(elem->message.data.cfc_del_event.error)) {
  3063. BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
  3064. cid);
  3065. bnx2x_panic_dump(bp);
  3066. }
  3067. bnx2x_cnic_cfc_comp(bp, cid);
  3068. return 0;
  3069. }
  3070. #endif
  3071. static void bnx2x_eq_int(struct bnx2x *bp)
  3072. {
  3073. u16 hw_cons, sw_cons, sw_prod;
  3074. union event_ring_elem *elem;
  3075. u32 cid;
  3076. u8 opcode;
  3077. int spqe_cnt = 0;
  3078. hw_cons = le16_to_cpu(*bp->eq_cons_sb);
  3079. /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
  3080. * when we get the the next-page we nned to adjust so the loop
  3081. * condition below will be met. The next element is the size of a
  3082. * regular element and hence incrementing by 1
  3083. */
  3084. if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
  3085. hw_cons++;
  3086. /* This function may never run in parallel with itself for a
  3087. * specific bp, thus there is no need in "paired" read memory
  3088. * barrier here.
  3089. */
  3090. sw_cons = bp->eq_cons;
  3091. sw_prod = bp->eq_prod;
  3092. DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
  3093. hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
  3094. for (; sw_cons != hw_cons;
  3095. sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
  3096. elem = &bp->eq_ring[EQ_DESC(sw_cons)];
  3097. cid = SW_CID(elem->message.data.cfc_del_event.cid);
  3098. opcode = elem->message.opcode;
  3099. /* handle eq element */
  3100. switch (opcode) {
  3101. case EVENT_RING_OPCODE_STAT_QUERY:
  3102. DP(NETIF_MSG_TIMER, "got statistics comp event\n");
  3103. /* nothing to do with stats comp */
  3104. continue;
  3105. case EVENT_RING_OPCODE_CFC_DEL:
  3106. /* handle according to cid range */
  3107. /*
  3108. * we may want to verify here that the bp state is
  3109. * HALTING
  3110. */
  3111. DP(NETIF_MSG_IFDOWN,
  3112. "got delete ramrod for MULTI[%d]\n", cid);
  3113. #ifdef BCM_CNIC
  3114. if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
  3115. goto next_spqe;
  3116. if (cid == BNX2X_FCOE_ETH_CID)
  3117. bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
  3118. else
  3119. #endif
  3120. bnx2x_fp(bp, cid, state) =
  3121. BNX2X_FP_STATE_CLOSED;
  3122. goto next_spqe;
  3123. case EVENT_RING_OPCODE_STOP_TRAFFIC:
  3124. DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
  3125. bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
  3126. goto next_spqe;
  3127. case EVENT_RING_OPCODE_START_TRAFFIC:
  3128. DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
  3129. bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
  3130. goto next_spqe;
  3131. }
  3132. switch (opcode | bp->state) {
  3133. case (EVENT_RING_OPCODE_FUNCTION_START |
  3134. BNX2X_STATE_OPENING_WAIT4_PORT):
  3135. DP(NETIF_MSG_IFUP, "got setup ramrod\n");
  3136. bp->state = BNX2X_STATE_FUNC_STARTED;
  3137. break;
  3138. case (EVENT_RING_OPCODE_FUNCTION_STOP |
  3139. BNX2X_STATE_CLOSING_WAIT4_HALT):
  3140. DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
  3141. bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
  3142. break;
  3143. case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
  3144. case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
  3145. DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
  3146. if (elem->message.data.set_mac_event.echo)
  3147. bp->set_mac_pending = 0;
  3148. break;
  3149. case (EVENT_RING_OPCODE_SET_MAC |
  3150. BNX2X_STATE_CLOSING_WAIT4_HALT):
  3151. DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
  3152. if (elem->message.data.set_mac_event.echo)
  3153. bp->set_mac_pending = 0;
  3154. break;
  3155. default:
  3156. /* unknown event log error and continue */
  3157. BNX2X_ERR("Unknown EQ event %d\n",
  3158. elem->message.opcode);
  3159. }
  3160. next_spqe:
  3161. spqe_cnt++;
  3162. } /* for */
  3163. smp_mb__before_atomic_inc();
  3164. atomic_add(spqe_cnt, &bp->eq_spq_left);
  3165. bp->eq_cons = sw_cons;
  3166. bp->eq_prod = sw_prod;
  3167. /* Make sure that above mem writes were issued towards the memory */
  3168. smp_wmb();
  3169. /* update producer */
  3170. bnx2x_update_eq_prod(bp, bp->eq_prod);
  3171. }
  3172. static void bnx2x_sp_task(struct work_struct *work)
  3173. {
  3174. struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
  3175. u16 status;
  3176. /* Return here if interrupt is disabled */
  3177. if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
  3178. DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
  3179. return;
  3180. }
  3181. status = bnx2x_update_dsb_idx(bp);
  3182. /* if (status == 0) */
  3183. /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
  3184. DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
  3185. /* HW attentions */
  3186. if (status & BNX2X_DEF_SB_ATT_IDX) {
  3187. bnx2x_attn_int(bp);
  3188. status &= ~BNX2X_DEF_SB_ATT_IDX;
  3189. }
  3190. /* SP events: STAT_QUERY and others */
  3191. if (status & BNX2X_DEF_SB_IDX) {
  3192. #ifdef BCM_CNIC
  3193. struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
  3194. if ((!NO_FCOE(bp)) &&
  3195. (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
  3196. napi_schedule(&bnx2x_fcoe(bp, napi));
  3197. #endif
  3198. /* Handle EQ completions */
  3199. bnx2x_eq_int(bp);
  3200. bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
  3201. le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
  3202. status &= ~BNX2X_DEF_SB_IDX;
  3203. }
  3204. if (unlikely(status))
  3205. DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
  3206. status);
  3207. bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
  3208. le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
  3209. }
  3210. irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
  3211. {
  3212. struct net_device *dev = dev_instance;
  3213. struct bnx2x *bp = netdev_priv(dev);
  3214. /* Return here if interrupt is disabled */
  3215. if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
  3216. DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
  3217. return IRQ_HANDLED;
  3218. }
  3219. bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
  3220. IGU_INT_DISABLE, 0);
  3221. #ifdef BNX2X_STOP_ON_ERROR
  3222. if (unlikely(bp->panic))
  3223. return IRQ_HANDLED;
  3224. #endif
  3225. #ifdef BCM_CNIC
  3226. {
  3227. struct cnic_ops *c_ops;
  3228. rcu_read_lock();
  3229. c_ops = rcu_dereference(bp->cnic_ops);
  3230. if (c_ops)
  3231. c_ops->cnic_handler(bp->cnic_data, NULL);
  3232. rcu_read_unlock();
  3233. }
  3234. #endif
  3235. queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
  3236. return IRQ_HANDLED;
  3237. }
  3238. /* end of slow path */
  3239. static void bnx2x_timer(unsigned long data)
  3240. {
  3241. struct bnx2x *bp = (struct bnx2x *) data;
  3242. if (!netif_running(bp->dev))
  3243. return;
  3244. if (atomic_read(&bp->intr_sem) != 0)
  3245. goto timer_restart;
  3246. if (poll) {
  3247. struct bnx2x_fastpath *fp = &bp->fp[0];
  3248. bnx2x_tx_int(fp);
  3249. bnx2x_rx_int(fp, 1000);
  3250. }
  3251. if (!BP_NOMCP(bp)) {
  3252. int mb_idx = BP_FW_MB_IDX(bp);
  3253. u32 drv_pulse;
  3254. u32 mcp_pulse;
  3255. ++bp->fw_drv_pulse_wr_seq;
  3256. bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
  3257. /* TBD - add SYSTEM_TIME */
  3258. drv_pulse = bp->fw_drv_pulse_wr_seq;
  3259. SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
  3260. mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
  3261. MCP_PULSE_SEQ_MASK);
  3262. /* The delta between driver pulse and mcp response
  3263. * should be 1 (before mcp response) or 0 (after mcp response)
  3264. */
  3265. if ((drv_pulse != mcp_pulse) &&
  3266. (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
  3267. /* someone lost a heartbeat... */
  3268. BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
  3269. drv_pulse, mcp_pulse);
  3270. }
  3271. }
  3272. if (bp->state == BNX2X_STATE_OPEN)
  3273. bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
  3274. timer_restart:
  3275. mod_timer(&bp->timer, jiffies + bp->current_interval);
  3276. }
  3277. /* end of Statistics */
  3278. /* nic init */
  3279. /*
  3280. * nic init service functions
  3281. */
  3282. static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
  3283. {
  3284. u32 i;
  3285. if (!(len%4) && !(addr%4))
  3286. for (i = 0; i < len; i += 4)
  3287. REG_WR(bp, addr + i, fill);
  3288. else
  3289. for (i = 0; i < len; i++)
  3290. REG_WR8(bp, addr + i, fill);
  3291. }
  3292. /* helper: writes FP SP data to FW - data_size in dwords */
  3293. static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
  3294. int fw_sb_id,
  3295. u32 *sb_data_p,
  3296. u32 data_size)
  3297. {
  3298. int index;
  3299. for (index = 0; index < data_size; index++)
  3300. REG_WR(bp, BAR_CSTRORM_INTMEM +
  3301. CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
  3302. sizeof(u32)*index,
  3303. *(sb_data_p + index));
  3304. }
  3305. static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
  3306. {
  3307. u32 *sb_data_p;
  3308. u32 data_size = 0;
  3309. struct hc_status_block_data_e2 sb_data_e2;
  3310. struct hc_status_block_data_e1x sb_data_e1x;
  3311. /* disable the function first */
  3312. if (CHIP_IS_E2(bp)) {
  3313. memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
  3314. sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
  3315. sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
  3316. sb_data_e2.common.p_func.vf_valid = false;
  3317. sb_data_p = (u32 *)&sb_data_e2;
  3318. data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
  3319. } else {
  3320. memset(&sb_data_e1x, 0,
  3321. sizeof(struct hc_status_block_data_e1x));
  3322. sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
  3323. sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
  3324. sb_data_e1x.common.p_func.vf_valid = false;
  3325. sb_data_p = (u32 *)&sb_data_e1x;
  3326. data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
  3327. }
  3328. bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
  3329. bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
  3330. CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
  3331. CSTORM_STATUS_BLOCK_SIZE);
  3332. bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
  3333. CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
  3334. CSTORM_SYNC_BLOCK_SIZE);
  3335. }
  3336. /* helper: writes SP SB data to FW */
  3337. static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
  3338. struct hc_sp_status_block_data *sp_sb_data)
  3339. {
  3340. int func = BP_FUNC(bp);
  3341. int i;
  3342. for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
  3343. REG_WR(bp, BAR_CSTRORM_INTMEM +
  3344. CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
  3345. i*sizeof(u32),
  3346. *((u32 *)sp_sb_data + i));
  3347. }
  3348. static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
  3349. {
  3350. int func = BP_FUNC(bp);
  3351. struct hc_sp_status_block_data sp_sb_data;
  3352. memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
  3353. sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
  3354. sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
  3355. sp_sb_data.p_func.vf_valid = false;
  3356. bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
  3357. bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
  3358. CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
  3359. CSTORM_SP_STATUS_BLOCK_SIZE);
  3360. bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
  3361. CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
  3362. CSTORM_SP_SYNC_BLOCK_SIZE);
  3363. }
  3364. static inline
  3365. void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
  3366. int igu_sb_id, int igu_seg_id)
  3367. {
  3368. hc_sm->igu_sb_id = igu_sb_id;
  3369. hc_sm->igu_seg_id = igu_seg_id;
  3370. hc_sm->timer_value = 0xFF;
  3371. hc_sm->time_to_expire = 0xFFFFFFFF;
  3372. }
  3373. static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
  3374. u8 vf_valid, int fw_sb_id, int igu_sb_id)
  3375. {
  3376. int igu_seg_id;
  3377. struct hc_status_block_data_e2 sb_data_e2;
  3378. struct hc_status_block_data_e1x sb_data_e1x;
  3379. struct hc_status_block_sm *hc_sm_p;
  3380. int data_size;
  3381. u32 *sb_data_p;
  3382. if (CHIP_INT_MODE_IS_BC(bp))
  3383. igu_seg_id = HC_SEG_ACCESS_NORM;
  3384. else
  3385. igu_seg_id = IGU_SEG_ACCESS_NORM;
  3386. bnx2x_zero_fp_sb(bp, fw_sb_id);
  3387. if (CHIP_IS_E2(bp)) {
  3388. memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
  3389. sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
  3390. sb_data_e2.common.p_func.vf_id = vfid;
  3391. sb_data_e2.common.p_func.vf_valid = vf_valid;
  3392. sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
  3393. sb_data_e2.common.same_igu_sb_1b = true;
  3394. sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
  3395. sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
  3396. hc_sm_p = sb_data_e2.common.state_machine;
  3397. sb_data_p = (u32 *)&sb_data_e2;
  3398. data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
  3399. } else {
  3400. memset(&sb_data_e1x, 0,
  3401. sizeof(struct hc_status_block_data_e1x));
  3402. sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
  3403. sb_data_e1x.common.p_func.vf_id = 0xff;
  3404. sb_data_e1x.common.p_func.vf_valid = false;
  3405. sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
  3406. sb_data_e1x.common.same_igu_sb_1b = true;
  3407. sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
  3408. sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
  3409. hc_sm_p = sb_data_e1x.common.state_machine;
  3410. sb_data_p = (u32 *)&sb_data_e1x;
  3411. data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
  3412. }
  3413. bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
  3414. igu_sb_id, igu_seg_id);
  3415. bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
  3416. igu_sb_id, igu_seg_id);
  3417. DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
  3418. /* write indecies to HW */
  3419. bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
  3420. }
  3421. static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
  3422. u8 sb_index, u8 disable, u16 usec)
  3423. {
  3424. int port = BP_PORT(bp);
  3425. u8 ticks = usec / BNX2X_BTR;
  3426. storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
  3427. disable = disable ? 1 : (usec ? 0 : 1);
  3428. storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
  3429. }
  3430. static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
  3431. u16 tx_usec, u16 rx_usec)
  3432. {
  3433. bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
  3434. false, rx_usec);
  3435. bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
  3436. false, tx_usec);
  3437. }
  3438. static void bnx2x_init_def_sb(struct bnx2x *bp)
  3439. {
  3440. struct host_sp_status_block *def_sb = bp->def_status_blk;
  3441. dma_addr_t mapping = bp->def_status_blk_mapping;
  3442. int igu_sp_sb_index;
  3443. int igu_seg_id;
  3444. int port = BP_PORT(bp);
  3445. int func = BP_FUNC(bp);
  3446. int reg_offset, reg_offset_en5;
  3447. u64 section;
  3448. int index;
  3449. struct hc_sp_status_block_data sp_sb_data;
  3450. memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
  3451. if (CHIP_INT_MODE_IS_BC(bp)) {
  3452. igu_sp_sb_index = DEF_SB_IGU_ID;
  3453. igu_seg_id = HC_SEG_ACCESS_DEF;
  3454. } else {
  3455. igu_sp_sb_index = bp->igu_dsb_id;
  3456. igu_seg_id = IGU_SEG_ACCESS_DEF;
  3457. }
  3458. /* ATTN */
  3459. section = ((u64)mapping) + offsetof(struct host_sp_status_block,
  3460. atten_status_block);
  3461. def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
  3462. bp->attn_state = 0;
  3463. reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
  3464. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
  3465. reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
  3466. MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
  3467. for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
  3468. int sindex;
  3469. /* take care of sig[0]..sig[4] */
  3470. for (sindex = 0; sindex < 4; sindex++)
  3471. bp->attn_group[index].sig[sindex] =
  3472. REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
  3473. if (CHIP_IS_E2(bp))
  3474. /*
  3475. * enable5 is separate from the rest of the registers,
  3476. * and therefore the address skip is 4
  3477. * and not 16 between the different groups
  3478. */
  3479. bp->attn_group[index].sig[4] = REG_RD(bp,
  3480. reg_offset_en5 + 0x4*index);
  3481. else
  3482. bp->attn_group[index].sig[4] = 0;
  3483. }
  3484. if (bp->common.int_block == INT_BLOCK_HC) {
  3485. reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
  3486. HC_REG_ATTN_MSG0_ADDR_L);
  3487. REG_WR(bp, reg_offset, U64_LO(section));
  3488. REG_WR(bp, reg_offset + 4, U64_HI(section));
  3489. } else if (CHIP_IS_E2(bp)) {
  3490. REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
  3491. REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
  3492. }
  3493. section = ((u64)mapping) + offsetof(struct host_sp_status_block,
  3494. sp_sb);
  3495. bnx2x_zero_sp_sb(bp);
  3496. sp_sb_data.host_sb_addr.lo = U64_LO(section);
  3497. sp_sb_data.host_sb_addr.hi = U64_HI(section);
  3498. sp_sb_data.igu_sb_id = igu_sp_sb_index;
  3499. sp_sb_data.igu_seg_id = igu_seg_id;
  3500. sp_sb_data.p_func.pf_id = func;
  3501. sp_sb_data.p_func.vnic_id = BP_VN(bp);
  3502. sp_sb_data.p_func.vf_id = 0xff;
  3503. bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
  3504. bp->stats_pending = 0;
  3505. bp->set_mac_pending = 0;
  3506. bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
  3507. }
  3508. void bnx2x_update_coalesce(struct bnx2x *bp)
  3509. {
  3510. int i;
  3511. for_each_eth_queue(bp, i)
  3512. bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
  3513. bp->tx_ticks, bp->rx_ticks);
  3514. }
  3515. static void bnx2x_init_sp_ring(struct bnx2x *bp)
  3516. {
  3517. spin_lock_init(&bp->spq_lock);
  3518. atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
  3519. bp->spq_prod_idx = 0;
  3520. bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
  3521. bp->spq_prod_bd = bp->spq;
  3522. bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
  3523. }
  3524. static void bnx2x_init_eq_ring(struct bnx2x *bp)
  3525. {
  3526. int i;
  3527. for (i = 1; i <= NUM_EQ_PAGES; i++) {
  3528. union event_ring_elem *elem =
  3529. &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
  3530. elem->next_page.addr.hi =
  3531. cpu_to_le32(U64_HI(bp->eq_mapping +
  3532. BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
  3533. elem->next_page.addr.lo =
  3534. cpu_to_le32(U64_LO(bp->eq_mapping +
  3535. BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
  3536. }
  3537. bp->eq_cons = 0;
  3538. bp->eq_prod = NUM_EQ_DESC;
  3539. bp->eq_cons_sb = BNX2X_EQ_INDEX;
  3540. /* we want a warning message before it gets rought... */
  3541. atomic_set(&bp->eq_spq_left,
  3542. min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
  3543. }
  3544. void bnx2x_push_indir_table(struct bnx2x *bp)
  3545. {
  3546. int func = BP_FUNC(bp);
  3547. int i;
  3548. if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
  3549. return;
  3550. for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
  3551. REG_WR8(bp, BAR_TSTRORM_INTMEM +
  3552. TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
  3553. bp->fp->cl_id + bp->rx_indir_table[i]);
  3554. }
  3555. static void bnx2x_init_ind_table(struct bnx2x *bp)
  3556. {
  3557. int i;
  3558. for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
  3559. bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
  3560. bnx2x_push_indir_table(bp);
  3561. }
  3562. void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
  3563. {
  3564. int mode = bp->rx_mode;
  3565. int port = BP_PORT(bp);
  3566. u16 cl_id;
  3567. u32 def_q_filters = 0;
  3568. /* All but management unicast packets should pass to the host as well */
  3569. u32 llh_mask =
  3570. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
  3571. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
  3572. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
  3573. NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
  3574. switch (mode) {
  3575. case BNX2X_RX_MODE_NONE: /* no Rx */
  3576. def_q_filters = BNX2X_ACCEPT_NONE;
  3577. #ifdef BCM_CNIC
  3578. if (!NO_FCOE(bp)) {
  3579. cl_id = bnx2x_fcoe(bp, cl_id);
  3580. bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
  3581. }
  3582. #endif
  3583. break;
  3584. case BNX2X_RX_MODE_NORMAL:
  3585. def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
  3586. BNX2X_ACCEPT_MULTICAST;
  3587. #ifdef BCM_CNIC
  3588. if (!NO_FCOE(bp)) {
  3589. cl_id = bnx2x_fcoe(bp, cl_id);
  3590. bnx2x_rxq_set_mac_filters(bp, cl_id,
  3591. BNX2X_ACCEPT_UNICAST |
  3592. BNX2X_ACCEPT_MULTICAST);
  3593. }
  3594. #endif
  3595. break;
  3596. case BNX2X_RX_MODE_ALLMULTI:
  3597. def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
  3598. BNX2X_ACCEPT_ALL_MULTICAST;
  3599. #ifdef BCM_CNIC
  3600. /*
  3601. * Prevent duplication of multicast packets by configuring FCoE
  3602. * L2 Client to receive only matched unicast frames.
  3603. */
  3604. if (!NO_FCOE(bp)) {
  3605. cl_id = bnx2x_fcoe(bp, cl_id);
  3606. bnx2x_rxq_set_mac_filters(bp, cl_id,
  3607. BNX2X_ACCEPT_UNICAST);
  3608. }
  3609. #endif
  3610. break;
  3611. case BNX2X_RX_MODE_PROMISC:
  3612. def_q_filters |= BNX2X_PROMISCUOUS_MODE;
  3613. #ifdef BCM_CNIC
  3614. /*
  3615. * Prevent packets duplication by configuring DROP_ALL for FCoE
  3616. * L2 Client.
  3617. */
  3618. if (!NO_FCOE(bp)) {
  3619. cl_id = bnx2x_fcoe(bp, cl_id);
  3620. bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
  3621. }
  3622. #endif
  3623. /* pass management unicast packets as well */
  3624. llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
  3625. break;
  3626. default:
  3627. BNX2X_ERR("BAD rx mode (%d)\n", mode);
  3628. break;
  3629. }
  3630. cl_id = BP_L_ID(bp);
  3631. bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
  3632. REG_WR(bp,
  3633. (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
  3634. NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
  3635. DP(NETIF_MSG_IFUP, "rx mode %d\n"
  3636. "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
  3637. "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
  3638. "unmatched_ucast 0x%x\n", mode,
  3639. bp->mac_filters.ucast_drop_all,
  3640. bp->mac_filters.mcast_drop_all,
  3641. bp->mac_filters.bcast_drop_all,
  3642. bp->mac_filters.ucast_accept_all,
  3643. bp->mac_filters.mcast_accept_all,
  3644. bp->mac_filters.bcast_accept_all,
  3645. bp->mac_filters.unmatched_unicast
  3646. );
  3647. storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
  3648. }
  3649. static void bnx2x_init_internal_common(struct bnx2x *bp)
  3650. {
  3651. int i;
  3652. if (!CHIP_IS_E1(bp)) {
  3653. /* xstorm needs to know whether to add ovlan to packets or not,
  3654. * in switch-independent we'll write 0 to here... */
  3655. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
  3656. bp->mf_mode);
  3657. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
  3658. bp->mf_mode);
  3659. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
  3660. bp->mf_mode);
  3661. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
  3662. bp->mf_mode);
  3663. }
  3664. if (IS_MF_SI(bp))
  3665. /*
  3666. * In switch independent mode, the TSTORM needs to accept
  3667. * packets that failed classification, since approximate match
  3668. * mac addresses aren't written to NIG LLH
  3669. */
  3670. REG_WR8(bp, BAR_TSTRORM_INTMEM +
  3671. TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
  3672. /* Zero this manually as its initialization is
  3673. currently missing in the initTool */
  3674. for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
  3675. REG_WR(bp, BAR_USTRORM_INTMEM +
  3676. USTORM_AGG_DATA_OFFSET + i * 4, 0);
  3677. if (CHIP_IS_E2(bp)) {
  3678. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
  3679. CHIP_INT_MODE_IS_BC(bp) ?
  3680. HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
  3681. }
  3682. }
  3683. static void bnx2x_init_internal_port(struct bnx2x *bp)
  3684. {
  3685. /* port */
  3686. bnx2x_dcb_init_intmem_pfc(bp);
  3687. }
  3688. static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
  3689. {
  3690. switch (load_code) {
  3691. case FW_MSG_CODE_DRV_LOAD_COMMON:
  3692. case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
  3693. bnx2x_init_internal_common(bp);
  3694. /* no break */
  3695. case FW_MSG_CODE_DRV_LOAD_PORT:
  3696. bnx2x_init_internal_port(bp);
  3697. /* no break */
  3698. case FW_MSG_CODE_DRV_LOAD_FUNCTION:
  3699. /* internal memory per function is
  3700. initialized inside bnx2x_pf_init */
  3701. break;
  3702. default:
  3703. BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
  3704. break;
  3705. }
  3706. }
  3707. static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
  3708. {
  3709. struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
  3710. fp->state = BNX2X_FP_STATE_CLOSED;
  3711. fp->cid = fp_idx;
  3712. fp->cl_id = BP_L_ID(bp) + fp_idx;
  3713. fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
  3714. fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
  3715. /* qZone id equals to FW (per path) client id */
  3716. fp->cl_qzone_id = fp->cl_id +
  3717. BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
  3718. ETH_MAX_RX_CLIENTS_E1H);
  3719. /* init shortcut */
  3720. fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
  3721. USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
  3722. USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
  3723. /* Setup SB indicies */
  3724. fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
  3725. fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
  3726. DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
  3727. "cl_id %d fw_sb %d igu_sb %d\n",
  3728. fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
  3729. fp->igu_sb_id);
  3730. bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
  3731. fp->fw_sb_id, fp->igu_sb_id);
  3732. bnx2x_update_fpsb_idx(fp);
  3733. }
  3734. void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
  3735. {
  3736. int i;
  3737. for_each_eth_queue(bp, i)
  3738. bnx2x_init_fp_sb(bp, i);
  3739. #ifdef BCM_CNIC
  3740. if (!NO_FCOE(bp))
  3741. bnx2x_init_fcoe_fp(bp);
  3742. bnx2x_init_sb(bp, bp->cnic_sb_mapping,
  3743. BNX2X_VF_ID_INVALID, false,
  3744. CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
  3745. #endif
  3746. /* ensure status block indices were read */
  3747. rmb();
  3748. bnx2x_init_def_sb(bp);
  3749. bnx2x_update_dsb_idx(bp);
  3750. bnx2x_init_rx_rings(bp);
  3751. bnx2x_init_tx_rings(bp);
  3752. bnx2x_init_sp_ring(bp);
  3753. bnx2x_init_eq_ring(bp);
  3754. bnx2x_init_internal(bp, load_code);
  3755. bnx2x_pf_init(bp);
  3756. bnx2x_init_ind_table(bp);
  3757. bnx2x_stats_init(bp);
  3758. /* At this point, we are ready for interrupts */
  3759. atomic_set(&bp->intr_sem, 0);
  3760. /* flush all before enabling interrupts */
  3761. mb();
  3762. mmiowb();
  3763. bnx2x_int_enable(bp);
  3764. /* Check for SPIO5 */
  3765. bnx2x_attn_int_deasserted0(bp,
  3766. REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
  3767. AEU_INPUTS_ATTN_BITS_SPIO5);
  3768. }
  3769. /* end of nic init */
  3770. /*
  3771. * gzip service functions
  3772. */
  3773. static int bnx2x_gunzip_init(struct bnx2x *bp)
  3774. {
  3775. bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
  3776. &bp->gunzip_mapping, GFP_KERNEL);
  3777. if (bp->gunzip_buf == NULL)
  3778. goto gunzip_nomem1;
  3779. bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
  3780. if (bp->strm == NULL)
  3781. goto gunzip_nomem2;
  3782. bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
  3783. if (bp->strm->workspace == NULL)
  3784. goto gunzip_nomem3;
  3785. return 0;
  3786. gunzip_nomem3:
  3787. kfree(bp->strm);
  3788. bp->strm = NULL;
  3789. gunzip_nomem2:
  3790. dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
  3791. bp->gunzip_mapping);
  3792. bp->gunzip_buf = NULL;
  3793. gunzip_nomem1:
  3794. netdev_err(bp->dev, "Cannot allocate firmware buffer for"
  3795. " un-compression\n");
  3796. return -ENOMEM;
  3797. }
  3798. static void bnx2x_gunzip_end(struct bnx2x *bp)
  3799. {
  3800. if (bp->strm) {
  3801. vfree(bp->strm->workspace);
  3802. kfree(bp->strm);
  3803. bp->strm = NULL;
  3804. }
  3805. if (bp->gunzip_buf) {
  3806. dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
  3807. bp->gunzip_mapping);
  3808. bp->gunzip_buf = NULL;
  3809. }
  3810. }
  3811. static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
  3812. {
  3813. int n, rc;
  3814. /* check gzip header */
  3815. if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
  3816. BNX2X_ERR("Bad gzip header\n");
  3817. return -EINVAL;
  3818. }
  3819. n = 10;
  3820. #define FNAME 0x8
  3821. if (zbuf[3] & FNAME)
  3822. while ((zbuf[n++] != 0) && (n < len));
  3823. bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
  3824. bp->strm->avail_in = len - n;
  3825. bp->strm->next_out = bp->gunzip_buf;
  3826. bp->strm->avail_out = FW_BUF_SIZE;
  3827. rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
  3828. if (rc != Z_OK)
  3829. return rc;
  3830. rc = zlib_inflate(bp->strm, Z_FINISH);
  3831. if ((rc != Z_OK) && (rc != Z_STREAM_END))
  3832. netdev_err(bp->dev, "Firmware decompression error: %s\n",
  3833. bp->strm->msg);
  3834. bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
  3835. if (bp->gunzip_outlen & 0x3)
  3836. netdev_err(bp->dev, "Firmware decompression error:"
  3837. " gunzip_outlen (%d) not aligned\n",
  3838. bp->gunzip_outlen);
  3839. bp->gunzip_outlen >>= 2;
  3840. zlib_inflateEnd(bp->strm);
  3841. if (rc == Z_STREAM_END)
  3842. return 0;
  3843. return rc;
  3844. }
  3845. /* nic load/unload */
  3846. /*
  3847. * General service functions
  3848. */
  3849. /* send a NIG loopback debug packet */
  3850. static void bnx2x_lb_pckt(struct bnx2x *bp)
  3851. {
  3852. u32 wb_write[3];
  3853. /* Ethernet source and destination addresses */
  3854. wb_write[0] = 0x55555555;
  3855. wb_write[1] = 0x55555555;
  3856. wb_write[2] = 0x20; /* SOP */
  3857. REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
  3858. /* NON-IP protocol */
  3859. wb_write[0] = 0x09000000;
  3860. wb_write[1] = 0x55555555;
  3861. wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
  3862. REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
  3863. }
  3864. /* some of the internal memories
  3865. * are not directly readable from the driver
  3866. * to test them we send debug packets
  3867. */
  3868. static int bnx2x_int_mem_test(struct bnx2x *bp)
  3869. {
  3870. int factor;
  3871. int count, i;
  3872. u32 val = 0;
  3873. if (CHIP_REV_IS_FPGA(bp))
  3874. factor = 120;
  3875. else if (CHIP_REV_IS_EMUL(bp))
  3876. factor = 200;
  3877. else
  3878. factor = 1;
  3879. /* Disable inputs of parser neighbor blocks */
  3880. REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
  3881. REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
  3882. REG_WR(bp, CFC_REG_DEBUG0, 0x1);
  3883. REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
  3884. /* Write 0 to parser credits for CFC search request */
  3885. REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
  3886. /* send Ethernet packet */
  3887. bnx2x_lb_pckt(bp);
  3888. /* TODO do i reset NIG statistic? */
  3889. /* Wait until NIG register shows 1 packet of size 0x10 */
  3890. count = 1000 * factor;
  3891. while (count) {
  3892. bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
  3893. val = *bnx2x_sp(bp, wb_data[0]);
  3894. if (val == 0x10)
  3895. break;
  3896. msleep(10);
  3897. count--;
  3898. }
  3899. if (val != 0x10) {
  3900. BNX2X_ERR("NIG timeout val = 0x%x\n", val);
  3901. return -1;
  3902. }
  3903. /* Wait until PRS register shows 1 packet */
  3904. count = 1000 * factor;
  3905. while (count) {
  3906. val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
  3907. if (val == 1)
  3908. break;
  3909. msleep(10);
  3910. count--;
  3911. }
  3912. if (val != 0x1) {
  3913. BNX2X_ERR("PRS timeout val = 0x%x\n", val);
  3914. return -2;
  3915. }
  3916. /* Reset and init BRB, PRS */
  3917. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
  3918. msleep(50);
  3919. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
  3920. msleep(50);
  3921. bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
  3922. bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
  3923. DP(NETIF_MSG_HW, "part2\n");
  3924. /* Disable inputs of parser neighbor blocks */
  3925. REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
  3926. REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
  3927. REG_WR(bp, CFC_REG_DEBUG0, 0x1);
  3928. REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
  3929. /* Write 0 to parser credits for CFC search request */
  3930. REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
  3931. /* send 10 Ethernet packets */
  3932. for (i = 0; i < 10; i++)
  3933. bnx2x_lb_pckt(bp);
  3934. /* Wait until NIG register shows 10 + 1
  3935. packets of size 11*0x10 = 0xb0 */
  3936. count = 1000 * factor;
  3937. while (count) {
  3938. bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
  3939. val = *bnx2x_sp(bp, wb_data[0]);
  3940. if (val == 0xb0)
  3941. break;
  3942. msleep(10);
  3943. count--;
  3944. }
  3945. if (val != 0xb0) {
  3946. BNX2X_ERR("NIG timeout val = 0x%x\n", val);
  3947. return -3;
  3948. }
  3949. /* Wait until PRS register shows 2 packets */
  3950. val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
  3951. if (val != 2)
  3952. BNX2X_ERR("PRS timeout val = 0x%x\n", val);
  3953. /* Write 1 to parser credits for CFC search request */
  3954. REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
  3955. /* Wait until PRS register shows 3 packets */
  3956. msleep(10 * factor);
  3957. /* Wait until NIG register shows 1 packet of size 0x10 */
  3958. val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
  3959. if (val != 3)
  3960. BNX2X_ERR("PRS timeout val = 0x%x\n", val);
  3961. /* clear NIG EOP FIFO */
  3962. for (i = 0; i < 11; i++)
  3963. REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
  3964. val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
  3965. if (val != 1) {
  3966. BNX2X_ERR("clear of NIG failed\n");
  3967. return -4;
  3968. }
  3969. /* Reset and init BRB, PRS, NIG */
  3970. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
  3971. msleep(50);
  3972. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
  3973. msleep(50);
  3974. bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
  3975. bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
  3976. #ifndef BCM_CNIC
  3977. /* set NIC mode */
  3978. REG_WR(bp, PRS_REG_NIC_MODE, 1);
  3979. #endif
  3980. /* Enable inputs of parser neighbor blocks */
  3981. REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
  3982. REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
  3983. REG_WR(bp, CFC_REG_DEBUG0, 0x0);
  3984. REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
  3985. DP(NETIF_MSG_HW, "done\n");
  3986. return 0; /* OK */
  3987. }
  3988. static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
  3989. {
  3990. REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
  3991. if (CHIP_IS_E2(bp))
  3992. REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
  3993. else
  3994. REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
  3995. REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
  3996. REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
  3997. /*
  3998. * mask read length error interrupts in brb for parser
  3999. * (parsing unit and 'checksum and crc' unit)
  4000. * these errors are legal (PU reads fixed length and CAC can cause
  4001. * read length error on truncated packets)
  4002. */
  4003. REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
  4004. REG_WR(bp, QM_REG_QM_INT_MASK, 0);
  4005. REG_WR(bp, TM_REG_TM_INT_MASK, 0);
  4006. REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
  4007. REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
  4008. REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
  4009. /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
  4010. /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
  4011. REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
  4012. REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
  4013. REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
  4014. /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
  4015. /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
  4016. REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
  4017. REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
  4018. REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
  4019. REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
  4020. /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
  4021. /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
  4022. if (CHIP_REV_IS_FPGA(bp))
  4023. REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
  4024. else if (CHIP_IS_E2(bp))
  4025. REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
  4026. (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
  4027. | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
  4028. | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
  4029. | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
  4030. | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
  4031. else
  4032. REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
  4033. REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
  4034. REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
  4035. REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
  4036. /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
  4037. /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
  4038. REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
  4039. REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
  4040. /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
  4041. REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
  4042. }
  4043. static void bnx2x_reset_common(struct bnx2x *bp)
  4044. {
  4045. /* reset_common */
  4046. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
  4047. 0xd3ffff7f);
  4048. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
  4049. }
  4050. static void bnx2x_init_pxp(struct bnx2x *bp)
  4051. {
  4052. u16 devctl;
  4053. int r_order, w_order;
  4054. pci_read_config_word(bp->pdev,
  4055. bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
  4056. DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
  4057. w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
  4058. if (bp->mrrs == -1)
  4059. r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
  4060. else {
  4061. DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
  4062. r_order = bp->mrrs;
  4063. }
  4064. bnx2x_init_pxp_arb(bp, r_order, w_order);
  4065. }
  4066. static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
  4067. {
  4068. int is_required;
  4069. u32 val;
  4070. int port;
  4071. if (BP_NOMCP(bp))
  4072. return;
  4073. is_required = 0;
  4074. val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
  4075. SHARED_HW_CFG_FAN_FAILURE_MASK;
  4076. if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
  4077. is_required = 1;
  4078. /*
  4079. * The fan failure mechanism is usually related to the PHY type since
  4080. * the power consumption of the board is affected by the PHY. Currently,
  4081. * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
  4082. */
  4083. else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
  4084. for (port = PORT_0; port < PORT_MAX; port++) {
  4085. is_required |=
  4086. bnx2x_fan_failure_det_req(
  4087. bp,
  4088. bp->common.shmem_base,
  4089. bp->common.shmem2_base,
  4090. port);
  4091. }
  4092. DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
  4093. if (is_required == 0)
  4094. return;
  4095. /* Fan failure is indicated by SPIO 5 */
  4096. bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
  4097. MISC_REGISTERS_SPIO_INPUT_HI_Z);
  4098. /* set to active low mode */
  4099. val = REG_RD(bp, MISC_REG_SPIO_INT);
  4100. val |= ((1 << MISC_REGISTERS_SPIO_5) <<
  4101. MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
  4102. REG_WR(bp, MISC_REG_SPIO_INT, val);
  4103. /* enable interrupt to signal the IGU */
  4104. val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
  4105. val |= (1 << MISC_REGISTERS_SPIO_5);
  4106. REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
  4107. }
  4108. static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
  4109. {
  4110. u32 offset = 0;
  4111. if (CHIP_IS_E1(bp))
  4112. return;
  4113. if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
  4114. return;
  4115. switch (BP_ABS_FUNC(bp)) {
  4116. case 0:
  4117. offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
  4118. break;
  4119. case 1:
  4120. offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
  4121. break;
  4122. case 2:
  4123. offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
  4124. break;
  4125. case 3:
  4126. offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
  4127. break;
  4128. case 4:
  4129. offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
  4130. break;
  4131. case 5:
  4132. offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
  4133. break;
  4134. case 6:
  4135. offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
  4136. break;
  4137. case 7:
  4138. offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
  4139. break;
  4140. default:
  4141. return;
  4142. }
  4143. REG_WR(bp, offset, pretend_func_num);
  4144. REG_RD(bp, offset);
  4145. DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
  4146. }
  4147. static void bnx2x_pf_disable(struct bnx2x *bp)
  4148. {
  4149. u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
  4150. val &= ~IGU_PF_CONF_FUNC_EN;
  4151. REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
  4152. REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
  4153. REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
  4154. }
  4155. static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
  4156. {
  4157. u32 val, i;
  4158. DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
  4159. bnx2x_reset_common(bp);
  4160. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
  4161. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
  4162. bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
  4163. if (!CHIP_IS_E1(bp))
  4164. REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
  4165. if (CHIP_IS_E2(bp)) {
  4166. u8 fid;
  4167. /**
  4168. * 4-port mode or 2-port mode we need to turn of master-enable
  4169. * for everyone, after that, turn it back on for self.
  4170. * so, we disregard multi-function or not, and always disable
  4171. * for all functions on the given path, this means 0,2,4,6 for
  4172. * path 0 and 1,3,5,7 for path 1
  4173. */
  4174. for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
  4175. if (fid == BP_ABS_FUNC(bp)) {
  4176. REG_WR(bp,
  4177. PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
  4178. 1);
  4179. continue;
  4180. }
  4181. bnx2x_pretend_func(bp, fid);
  4182. /* clear pf enable */
  4183. bnx2x_pf_disable(bp);
  4184. bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
  4185. }
  4186. }
  4187. bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
  4188. if (CHIP_IS_E1(bp)) {
  4189. /* enable HW interrupt from PXP on USDM overflow
  4190. bit 16 on INT_MASK_0 */
  4191. REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
  4192. }
  4193. bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
  4194. bnx2x_init_pxp(bp);
  4195. #ifdef __BIG_ENDIAN
  4196. REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
  4197. REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
  4198. REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
  4199. REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
  4200. REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
  4201. /* make sure this value is 0 */
  4202. REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
  4203. /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
  4204. REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
  4205. REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
  4206. REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
  4207. REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
  4208. #endif
  4209. bnx2x_ilt_init_page_size(bp, INITOP_SET);
  4210. if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
  4211. REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
  4212. /* let the HW do it's magic ... */
  4213. msleep(100);
  4214. /* finish PXP init */
  4215. val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
  4216. if (val != 1) {
  4217. BNX2X_ERR("PXP2 CFG failed\n");
  4218. return -EBUSY;
  4219. }
  4220. val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
  4221. if (val != 1) {
  4222. BNX2X_ERR("PXP2 RD_INIT failed\n");
  4223. return -EBUSY;
  4224. }
  4225. /* Timers bug workaround E2 only. We need to set the entire ILT to
  4226. * have entries with value "0" and valid bit on.
  4227. * This needs to be done by the first PF that is loaded in a path
  4228. * (i.e. common phase)
  4229. */
  4230. if (CHIP_IS_E2(bp)) {
  4231. struct ilt_client_info ilt_cli;
  4232. struct bnx2x_ilt ilt;
  4233. memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
  4234. memset(&ilt, 0, sizeof(struct bnx2x_ilt));
  4235. /* initialize dummy TM client */
  4236. ilt_cli.start = 0;
  4237. ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
  4238. ilt_cli.client_num = ILT_CLIENT_TM;
  4239. /* Step 1: set zeroes to all ilt page entries with valid bit on
  4240. * Step 2: set the timers first/last ilt entry to point
  4241. * to the entire range to prevent ILT range error for 3rd/4th
  4242. * vnic (this code assumes existence of the vnic)
  4243. *
  4244. * both steps performed by call to bnx2x_ilt_client_init_op()
  4245. * with dummy TM client
  4246. *
  4247. * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
  4248. * and his brother are split registers
  4249. */
  4250. bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
  4251. bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
  4252. bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
  4253. REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
  4254. REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
  4255. REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
  4256. }
  4257. REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
  4258. REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
  4259. if (CHIP_IS_E2(bp)) {
  4260. int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
  4261. (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
  4262. bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
  4263. bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
  4264. /* let the HW do it's magic ... */
  4265. do {
  4266. msleep(200);
  4267. val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
  4268. } while (factor-- && (val != 1));
  4269. if (val != 1) {
  4270. BNX2X_ERR("ATC_INIT failed\n");
  4271. return -EBUSY;
  4272. }
  4273. }
  4274. bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
  4275. /* clean the DMAE memory */
  4276. bp->dmae_ready = 1;
  4277. bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
  4278. bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
  4279. bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
  4280. bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
  4281. bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
  4282. bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
  4283. bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
  4284. bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
  4285. bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
  4286. bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
  4287. if (CHIP_MODE_IS_4_PORT(bp))
  4288. bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
  4289. /* QM queues pointers table */
  4290. bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
  4291. /* soft reset pulse */
  4292. REG_WR(bp, QM_REG_SOFT_RESET, 1);
  4293. REG_WR(bp, QM_REG_SOFT_RESET, 0);
  4294. #ifdef BCM_CNIC
  4295. bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
  4296. #endif
  4297. bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
  4298. REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
  4299. if (!CHIP_REV_IS_SLOW(bp)) {
  4300. /* enable hw interrupt from doorbell Q */
  4301. REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
  4302. }
  4303. bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
  4304. if (CHIP_MODE_IS_4_PORT(bp)) {
  4305. REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
  4306. REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
  4307. }
  4308. bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
  4309. REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
  4310. #ifndef BCM_CNIC
  4311. /* set NIC mode */
  4312. REG_WR(bp, PRS_REG_NIC_MODE, 1);
  4313. #endif
  4314. if (!CHIP_IS_E1(bp))
  4315. REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
  4316. if (CHIP_IS_E2(bp)) {
  4317. /* Bit-map indicating which L2 hdrs may appear after the
  4318. basic Ethernet header */
  4319. int has_ovlan = IS_MF_SD(bp);
  4320. REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
  4321. REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
  4322. }
  4323. bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
  4324. bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
  4325. bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
  4326. bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
  4327. bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
  4328. bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
  4329. bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
  4330. bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
  4331. bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
  4332. bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
  4333. bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
  4334. bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
  4335. if (CHIP_MODE_IS_4_PORT(bp))
  4336. bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
  4337. /* sync semi rtc */
  4338. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
  4339. 0x80000000);
  4340. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
  4341. 0x80000000);
  4342. bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
  4343. bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
  4344. bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
  4345. if (CHIP_IS_E2(bp)) {
  4346. int has_ovlan = IS_MF_SD(bp);
  4347. REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
  4348. REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
  4349. }
  4350. REG_WR(bp, SRC_REG_SOFT_RST, 1);
  4351. for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
  4352. REG_WR(bp, i, random32());
  4353. bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
  4354. #ifdef BCM_CNIC
  4355. REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
  4356. REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
  4357. REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
  4358. REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
  4359. REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
  4360. REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
  4361. REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
  4362. REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
  4363. REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
  4364. REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
  4365. #endif
  4366. REG_WR(bp, SRC_REG_SOFT_RST, 0);
  4367. if (sizeof(union cdu_context) != 1024)
  4368. /* we currently assume that a context is 1024 bytes */
  4369. dev_alert(&bp->pdev->dev, "please adjust the size "
  4370. "of cdu_context(%ld)\n",
  4371. (long)sizeof(union cdu_context));
  4372. bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
  4373. val = (4 << 24) + (0 << 12) + 1024;
  4374. REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
  4375. bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
  4376. REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
  4377. /* enable context validation interrupt from CFC */
  4378. REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
  4379. /* set the thresholds to prevent CFC/CDU race */
  4380. REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
  4381. bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
  4382. if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
  4383. REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
  4384. bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
  4385. bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
  4386. bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
  4387. /* Reset PCIE errors for debug */
  4388. REG_WR(bp, 0x2814, 0xffffffff);
  4389. REG_WR(bp, 0x3820, 0xffffffff);
  4390. if (CHIP_IS_E2(bp)) {
  4391. REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
  4392. (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
  4393. PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
  4394. REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
  4395. (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
  4396. PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
  4397. PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
  4398. REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
  4399. (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
  4400. PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
  4401. PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
  4402. }
  4403. bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
  4404. bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
  4405. bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
  4406. bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
  4407. bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
  4408. if (!CHIP_IS_E1(bp)) {
  4409. REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
  4410. REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
  4411. }
  4412. if (CHIP_IS_E2(bp)) {
  4413. /* Bit-map indicating which L2 hdrs may appear after the
  4414. basic Ethernet header */
  4415. REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
  4416. }
  4417. if (CHIP_REV_IS_SLOW(bp))
  4418. msleep(200);
  4419. /* finish CFC init */
  4420. val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
  4421. if (val != 1) {
  4422. BNX2X_ERR("CFC LL_INIT failed\n");
  4423. return -EBUSY;
  4424. }
  4425. val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
  4426. if (val != 1) {
  4427. BNX2X_ERR("CFC AC_INIT failed\n");
  4428. return -EBUSY;
  4429. }
  4430. val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
  4431. if (val != 1) {
  4432. BNX2X_ERR("CFC CAM_INIT failed\n");
  4433. return -EBUSY;
  4434. }
  4435. REG_WR(bp, CFC_REG_DEBUG0, 0);
  4436. if (CHIP_IS_E1(bp)) {
  4437. /* read NIG statistic
  4438. to see if this is our first up since powerup */
  4439. bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
  4440. val = *bnx2x_sp(bp, wb_data[0]);
  4441. /* do internal memory self test */
  4442. if ((val == 0) && bnx2x_int_mem_test(bp)) {
  4443. BNX2X_ERR("internal mem self test failed\n");
  4444. return -EBUSY;
  4445. }
  4446. }
  4447. bnx2x_setup_fan_failure_detection(bp);
  4448. /* clear PXP2 attentions */
  4449. REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
  4450. bnx2x_enable_blocks_attention(bp);
  4451. if (CHIP_PARITY_ENABLED(bp))
  4452. bnx2x_enable_blocks_parity(bp);
  4453. if (!BP_NOMCP(bp)) {
  4454. /* In E2 2-PORT mode, same ext phy is used for the two paths */
  4455. if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
  4456. CHIP_IS_E1x(bp)) {
  4457. u32 shmem_base[2], shmem2_base[2];
  4458. shmem_base[0] = bp->common.shmem_base;
  4459. shmem2_base[0] = bp->common.shmem2_base;
  4460. if (CHIP_IS_E2(bp)) {
  4461. shmem_base[1] =
  4462. SHMEM2_RD(bp, other_shmem_base_addr);
  4463. shmem2_base[1] =
  4464. SHMEM2_RD(bp, other_shmem2_base_addr);
  4465. }
  4466. bnx2x_acquire_phy_lock(bp);
  4467. bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
  4468. bp->common.chip_id);
  4469. bnx2x_release_phy_lock(bp);
  4470. }
  4471. } else
  4472. BNX2X_ERR("Bootcode is missing - can not initialize link\n");
  4473. return 0;
  4474. }
  4475. static int bnx2x_init_hw_port(struct bnx2x *bp)
  4476. {
  4477. int port = BP_PORT(bp);
  4478. int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
  4479. u32 low, high;
  4480. u32 val;
  4481. DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
  4482. REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
  4483. bnx2x_init_block(bp, PXP_BLOCK, init_stage);
  4484. bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
  4485. /* Timers bug workaround: disables the pf_master bit in pglue at
  4486. * common phase, we need to enable it here before any dmae access are
  4487. * attempted. Therefore we manually added the enable-master to the
  4488. * port phase (it also happens in the function phase)
  4489. */
  4490. if (CHIP_IS_E2(bp))
  4491. REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
  4492. bnx2x_init_block(bp, TCM_BLOCK, init_stage);
  4493. bnx2x_init_block(bp, UCM_BLOCK, init_stage);
  4494. bnx2x_init_block(bp, CCM_BLOCK, init_stage);
  4495. bnx2x_init_block(bp, XCM_BLOCK, init_stage);
  4496. /* QM cid (connection) count */
  4497. bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
  4498. #ifdef BCM_CNIC
  4499. bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
  4500. REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
  4501. REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
  4502. #endif
  4503. bnx2x_init_block(bp, DQ_BLOCK, init_stage);
  4504. if (CHIP_MODE_IS_4_PORT(bp))
  4505. bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
  4506. if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
  4507. bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
  4508. if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
  4509. /* no pause for emulation and FPGA */
  4510. low = 0;
  4511. high = 513;
  4512. } else {
  4513. if (IS_MF(bp))
  4514. low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
  4515. else if (bp->dev->mtu > 4096) {
  4516. if (bp->flags & ONE_PORT_FLAG)
  4517. low = 160;
  4518. else {
  4519. val = bp->dev->mtu;
  4520. /* (24*1024 + val*4)/256 */
  4521. low = 96 + (val/64) +
  4522. ((val % 64) ? 1 : 0);
  4523. }
  4524. } else
  4525. low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
  4526. high = low + 56; /* 14*1024/256 */
  4527. }
  4528. REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
  4529. REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
  4530. }
  4531. if (CHIP_MODE_IS_4_PORT(bp)) {
  4532. REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
  4533. REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
  4534. REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
  4535. BRB1_REG_MAC_GUARANTIED_0), 40);
  4536. }
  4537. bnx2x_init_block(bp, PRS_BLOCK, init_stage);
  4538. bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
  4539. bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
  4540. bnx2x_init_block(bp, USDM_BLOCK, init_stage);
  4541. bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
  4542. bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
  4543. bnx2x_init_block(bp, USEM_BLOCK, init_stage);
  4544. bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
  4545. bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
  4546. if (CHIP_MODE_IS_4_PORT(bp))
  4547. bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
  4548. bnx2x_init_block(bp, UPB_BLOCK, init_stage);
  4549. bnx2x_init_block(bp, XPB_BLOCK, init_stage);
  4550. bnx2x_init_block(bp, PBF_BLOCK, init_stage);
  4551. if (!CHIP_IS_E2(bp)) {
  4552. /* configure PBF to work without PAUSE mtu 9000 */
  4553. REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
  4554. /* update threshold */
  4555. REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
  4556. /* update init credit */
  4557. REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
  4558. /* probe changes */
  4559. REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
  4560. udelay(50);
  4561. REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
  4562. }
  4563. #ifdef BCM_CNIC
  4564. bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
  4565. #endif
  4566. bnx2x_init_block(bp, CDU_BLOCK, init_stage);
  4567. bnx2x_init_block(bp, CFC_BLOCK, init_stage);
  4568. if (CHIP_IS_E1(bp)) {
  4569. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
  4570. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
  4571. }
  4572. bnx2x_init_block(bp, HC_BLOCK, init_stage);
  4573. bnx2x_init_block(bp, IGU_BLOCK, init_stage);
  4574. bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
  4575. /* init aeu_mask_attn_func_0/1:
  4576. * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
  4577. * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
  4578. * bits 4-7 are used for "per vn group attention" */
  4579. val = IS_MF(bp) ? 0xF7 : 0x7;
  4580. /* Enable DCBX attention for all but E1 */
  4581. val |= CHIP_IS_E1(bp) ? 0 : 0x10;
  4582. REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
  4583. bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
  4584. bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
  4585. bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
  4586. bnx2x_init_block(bp, DBU_BLOCK, init_stage);
  4587. bnx2x_init_block(bp, DBG_BLOCK, init_stage);
  4588. bnx2x_init_block(bp, NIG_BLOCK, init_stage);
  4589. REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
  4590. if (!CHIP_IS_E1(bp)) {
  4591. /* 0x2 disable mf_ov, 0x1 enable */
  4592. REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
  4593. (IS_MF_SD(bp) ? 0x1 : 0x2));
  4594. if (CHIP_IS_E2(bp)) {
  4595. val = 0;
  4596. switch (bp->mf_mode) {
  4597. case MULTI_FUNCTION_SD:
  4598. val = 1;
  4599. break;
  4600. case MULTI_FUNCTION_SI:
  4601. val = 2;
  4602. break;
  4603. }
  4604. REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
  4605. NIG_REG_LLH0_CLS_TYPE), val);
  4606. }
  4607. {
  4608. REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
  4609. REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
  4610. REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
  4611. }
  4612. }
  4613. bnx2x_init_block(bp, MCP_BLOCK, init_stage);
  4614. bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
  4615. if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
  4616. bp->common.shmem2_base, port)) {
  4617. u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
  4618. MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
  4619. val = REG_RD(bp, reg_addr);
  4620. val |= AEU_INPUTS_ATTN_BITS_SPIO5;
  4621. REG_WR(bp, reg_addr, val);
  4622. }
  4623. bnx2x__link_reset(bp);
  4624. return 0;
  4625. }
  4626. static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
  4627. {
  4628. int reg;
  4629. if (CHIP_IS_E1(bp))
  4630. reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
  4631. else
  4632. reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
  4633. bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
  4634. }
  4635. static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
  4636. {
  4637. bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
  4638. }
  4639. static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
  4640. {
  4641. u32 i, base = FUNC_ILT_BASE(func);
  4642. for (i = base; i < base + ILT_PER_FUNC; i++)
  4643. bnx2x_ilt_wr(bp, i, 0);
  4644. }
  4645. static int bnx2x_init_hw_func(struct bnx2x *bp)
  4646. {
  4647. int port = BP_PORT(bp);
  4648. int func = BP_FUNC(bp);
  4649. struct bnx2x_ilt *ilt = BP_ILT(bp);
  4650. u16 cdu_ilt_start;
  4651. u32 addr, val;
  4652. u32 main_mem_base, main_mem_size, main_mem_prty_clr;
  4653. int i, main_mem_width;
  4654. DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
  4655. /* set MSI reconfigure capability */
  4656. if (bp->common.int_block == INT_BLOCK_HC) {
  4657. addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
  4658. val = REG_RD(bp, addr);
  4659. val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
  4660. REG_WR(bp, addr, val);
  4661. }
  4662. ilt = BP_ILT(bp);
  4663. cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
  4664. for (i = 0; i < L2_ILT_LINES(bp); i++) {
  4665. ilt->lines[cdu_ilt_start + i].page =
  4666. bp->context.vcxt + (ILT_PAGE_CIDS * i);
  4667. ilt->lines[cdu_ilt_start + i].page_mapping =
  4668. bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
  4669. /* cdu ilt pages are allocated manually so there's no need to
  4670. set the size */
  4671. }
  4672. bnx2x_ilt_init_op(bp, INITOP_SET);
  4673. #ifdef BCM_CNIC
  4674. bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
  4675. /* T1 hash bits value determines the T1 number of entries */
  4676. REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
  4677. #endif
  4678. #ifndef BCM_CNIC
  4679. /* set NIC mode */
  4680. REG_WR(bp, PRS_REG_NIC_MODE, 1);
  4681. #endif /* BCM_CNIC */
  4682. if (CHIP_IS_E2(bp)) {
  4683. u32 pf_conf = IGU_PF_CONF_FUNC_EN;
  4684. /* Turn on a single ISR mode in IGU if driver is going to use
  4685. * INT#x or MSI
  4686. */
  4687. if (!(bp->flags & USING_MSIX_FLAG))
  4688. pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
  4689. /*
  4690. * Timers workaround bug: function init part.
  4691. * Need to wait 20msec after initializing ILT,
  4692. * needed to make sure there are no requests in
  4693. * one of the PXP internal queues with "old" ILT addresses
  4694. */
  4695. msleep(20);
  4696. /*
  4697. * Master enable - Due to WB DMAE writes performed before this
  4698. * register is re-initialized as part of the regular function
  4699. * init
  4700. */
  4701. REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
  4702. /* Enable the function in IGU */
  4703. REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
  4704. }
  4705. bp->dmae_ready = 1;
  4706. bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
  4707. if (CHIP_IS_E2(bp))
  4708. REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
  4709. bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
  4710. bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
  4711. bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
  4712. bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
  4713. bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
  4714. bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
  4715. bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
  4716. bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
  4717. bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
  4718. if (CHIP_IS_E2(bp)) {
  4719. REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
  4720. BP_PATH(bp));
  4721. REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
  4722. BP_PATH(bp));
  4723. }
  4724. if (CHIP_MODE_IS_4_PORT(bp))
  4725. bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
  4726. if (CHIP_IS_E2(bp))
  4727. REG_WR(bp, QM_REG_PF_EN, 1);
  4728. bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
  4729. if (CHIP_MODE_IS_4_PORT(bp))
  4730. bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
  4731. bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
  4732. bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
  4733. bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
  4734. bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
  4735. bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
  4736. bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
  4737. bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
  4738. bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
  4739. bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
  4740. bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
  4741. bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
  4742. if (CHIP_IS_E2(bp))
  4743. REG_WR(bp, PBF_REG_DISABLE_PF, 0);
  4744. bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
  4745. bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
  4746. if (CHIP_IS_E2(bp))
  4747. REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
  4748. if (IS_MF(bp)) {
  4749. REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
  4750. REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
  4751. }
  4752. bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
  4753. /* HC init per function */
  4754. if (bp->common.int_block == INT_BLOCK_HC) {
  4755. if (CHIP_IS_E1H(bp)) {
  4756. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
  4757. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
  4758. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
  4759. }
  4760. bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
  4761. } else {
  4762. int num_segs, sb_idx, prod_offset;
  4763. REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
  4764. if (CHIP_IS_E2(bp)) {
  4765. REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
  4766. REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
  4767. }
  4768. bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
  4769. if (CHIP_IS_E2(bp)) {
  4770. int dsb_idx = 0;
  4771. /**
  4772. * Producer memory:
  4773. * E2 mode: address 0-135 match to the mapping memory;
  4774. * 136 - PF0 default prod; 137 - PF1 default prod;
  4775. * 138 - PF2 default prod; 139 - PF3 default prod;
  4776. * 140 - PF0 attn prod; 141 - PF1 attn prod;
  4777. * 142 - PF2 attn prod; 143 - PF3 attn prod;
  4778. * 144-147 reserved.
  4779. *
  4780. * E1.5 mode - In backward compatible mode;
  4781. * for non default SB; each even line in the memory
  4782. * holds the U producer and each odd line hold
  4783. * the C producer. The first 128 producers are for
  4784. * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
  4785. * producers are for the DSB for each PF.
  4786. * Each PF has five segments: (the order inside each
  4787. * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
  4788. * 132-135 C prods; 136-139 X prods; 140-143 T prods;
  4789. * 144-147 attn prods;
  4790. */
  4791. /* non-default-status-blocks */
  4792. num_segs = CHIP_INT_MODE_IS_BC(bp) ?
  4793. IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
  4794. for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
  4795. prod_offset = (bp->igu_base_sb + sb_idx) *
  4796. num_segs;
  4797. for (i = 0; i < num_segs; i++) {
  4798. addr = IGU_REG_PROD_CONS_MEMORY +
  4799. (prod_offset + i) * 4;
  4800. REG_WR(bp, addr, 0);
  4801. }
  4802. /* send consumer update with value 0 */
  4803. bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
  4804. USTORM_ID, 0, IGU_INT_NOP, 1);
  4805. bnx2x_igu_clear_sb(bp,
  4806. bp->igu_base_sb + sb_idx);
  4807. }
  4808. /* default-status-blocks */
  4809. num_segs = CHIP_INT_MODE_IS_BC(bp) ?
  4810. IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
  4811. if (CHIP_MODE_IS_4_PORT(bp))
  4812. dsb_idx = BP_FUNC(bp);
  4813. else
  4814. dsb_idx = BP_E1HVN(bp);
  4815. prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
  4816. IGU_BC_BASE_DSB_PROD + dsb_idx :
  4817. IGU_NORM_BASE_DSB_PROD + dsb_idx);
  4818. for (i = 0; i < (num_segs * E1HVN_MAX);
  4819. i += E1HVN_MAX) {
  4820. addr = IGU_REG_PROD_CONS_MEMORY +
  4821. (prod_offset + i)*4;
  4822. REG_WR(bp, addr, 0);
  4823. }
  4824. /* send consumer update with 0 */
  4825. if (CHIP_INT_MODE_IS_BC(bp)) {
  4826. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4827. USTORM_ID, 0, IGU_INT_NOP, 1);
  4828. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4829. CSTORM_ID, 0, IGU_INT_NOP, 1);
  4830. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4831. XSTORM_ID, 0, IGU_INT_NOP, 1);
  4832. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4833. TSTORM_ID, 0, IGU_INT_NOP, 1);
  4834. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4835. ATTENTION_ID, 0, IGU_INT_NOP, 1);
  4836. } else {
  4837. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4838. USTORM_ID, 0, IGU_INT_NOP, 1);
  4839. bnx2x_ack_sb(bp, bp->igu_dsb_id,
  4840. ATTENTION_ID, 0, IGU_INT_NOP, 1);
  4841. }
  4842. bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
  4843. /* !!! these should become driver const once
  4844. rf-tool supports split-68 const */
  4845. REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
  4846. REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
  4847. REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
  4848. REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
  4849. REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
  4850. REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
  4851. }
  4852. }
  4853. /* Reset PCIE errors for debug */
  4854. REG_WR(bp, 0x2114, 0xffffffff);
  4855. REG_WR(bp, 0x2120, 0xffffffff);
  4856. bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
  4857. bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
  4858. bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
  4859. bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
  4860. bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
  4861. bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
  4862. if (CHIP_IS_E1x(bp)) {
  4863. main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
  4864. main_mem_base = HC_REG_MAIN_MEMORY +
  4865. BP_PORT(bp) * (main_mem_size * 4);
  4866. main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
  4867. main_mem_width = 8;
  4868. val = REG_RD(bp, main_mem_prty_clr);
  4869. if (val)
  4870. DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
  4871. "block during "
  4872. "function init (0x%x)!\n", val);
  4873. /* Clear "false" parity errors in MSI-X table */
  4874. for (i = main_mem_base;
  4875. i < main_mem_base + main_mem_size * 4;
  4876. i += main_mem_width) {
  4877. bnx2x_read_dmae(bp, i, main_mem_width / 4);
  4878. bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
  4879. i, main_mem_width / 4);
  4880. }
  4881. /* Clear HC parity attention */
  4882. REG_RD(bp, main_mem_prty_clr);
  4883. }
  4884. bnx2x_phy_probe(&bp->link_params);
  4885. return 0;
  4886. }
  4887. int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
  4888. {
  4889. int rc = 0;
  4890. DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
  4891. BP_ABS_FUNC(bp), load_code);
  4892. bp->dmae_ready = 0;
  4893. spin_lock_init(&bp->dmae_lock);
  4894. switch (load_code) {
  4895. case FW_MSG_CODE_DRV_LOAD_COMMON:
  4896. case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
  4897. rc = bnx2x_init_hw_common(bp, load_code);
  4898. if (rc)
  4899. goto init_hw_err;
  4900. /* no break */
  4901. case FW_MSG_CODE_DRV_LOAD_PORT:
  4902. rc = bnx2x_init_hw_port(bp);
  4903. if (rc)
  4904. goto init_hw_err;
  4905. /* no break */
  4906. case FW_MSG_CODE_DRV_LOAD_FUNCTION:
  4907. rc = bnx2x_init_hw_func(bp);
  4908. if (rc)
  4909. goto init_hw_err;
  4910. break;
  4911. default:
  4912. BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
  4913. break;
  4914. }
  4915. if (!BP_NOMCP(bp)) {
  4916. int mb_idx = BP_FW_MB_IDX(bp);
  4917. bp->fw_drv_pulse_wr_seq =
  4918. (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
  4919. DRV_PULSE_SEQ_MASK);
  4920. DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
  4921. }
  4922. init_hw_err:
  4923. bnx2x_gunzip_end(bp);
  4924. return rc;
  4925. }
  4926. void bnx2x_free_mem(struct bnx2x *bp)
  4927. {
  4928. bnx2x_gunzip_end(bp);
  4929. /* fastpath */
  4930. bnx2x_free_fp_mem(bp);
  4931. /* end of fastpath */
  4932. BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
  4933. sizeof(struct host_sp_status_block));
  4934. BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
  4935. sizeof(struct bnx2x_slowpath));
  4936. BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
  4937. bp->context.size);
  4938. bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
  4939. BNX2X_FREE(bp->ilt->lines);
  4940. #ifdef BCM_CNIC
  4941. if (CHIP_IS_E2(bp))
  4942. BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
  4943. sizeof(struct host_hc_status_block_e2));
  4944. else
  4945. BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
  4946. sizeof(struct host_hc_status_block_e1x));
  4947. BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
  4948. #endif
  4949. BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
  4950. BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
  4951. BCM_PAGE_SIZE * NUM_EQ_PAGES);
  4952. BNX2X_FREE(bp->rx_indir_table);
  4953. }
  4954. int bnx2x_alloc_mem(struct bnx2x *bp)
  4955. {
  4956. if (bnx2x_gunzip_init(bp))
  4957. return -ENOMEM;
  4958. #ifdef BCM_CNIC
  4959. if (CHIP_IS_E2(bp))
  4960. BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
  4961. sizeof(struct host_hc_status_block_e2));
  4962. else
  4963. BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
  4964. sizeof(struct host_hc_status_block_e1x));
  4965. /* allocate searcher T2 table */
  4966. BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
  4967. #endif
  4968. BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
  4969. sizeof(struct host_sp_status_block));
  4970. BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
  4971. sizeof(struct bnx2x_slowpath));
  4972. bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
  4973. BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
  4974. bp->context.size);
  4975. BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
  4976. if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
  4977. goto alloc_mem_err;
  4978. /* Slow path ring */
  4979. BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
  4980. /* EQ */
  4981. BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
  4982. BCM_PAGE_SIZE * NUM_EQ_PAGES);
  4983. BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
  4984. TSTORM_INDIRECTION_TABLE_SIZE);
  4985. /* fastpath */
  4986. /* need to be done at the end, since it's self adjusting to amount
  4987. * of memory available for RSS queues
  4988. */
  4989. if (bnx2x_alloc_fp_mem(bp))
  4990. goto alloc_mem_err;
  4991. return 0;
  4992. alloc_mem_err:
  4993. bnx2x_free_mem(bp);
  4994. return -ENOMEM;
  4995. }
  4996. /*
  4997. * Init service functions
  4998. */
  4999. static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
  5000. int *state_p, int flags);
  5001. int bnx2x_func_start(struct bnx2x *bp)
  5002. {
  5003. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
  5004. /* Wait for completion */
  5005. return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
  5006. WAIT_RAMROD_COMMON);
  5007. }
  5008. static int bnx2x_func_stop(struct bnx2x *bp)
  5009. {
  5010. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
  5011. /* Wait for completion */
  5012. return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
  5013. 0, &(bp->state), WAIT_RAMROD_COMMON);
  5014. }
  5015. /**
  5016. * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
  5017. *
  5018. * @bp: driver handle
  5019. * @set: set or clear an entry (1 or 0)
  5020. * @mac: pointer to a buffer containing a MAC
  5021. * @cl_bit_vec: bit vector of clients to register a MAC for
  5022. * @cam_offset: offset in a CAM to use
  5023. * @is_bcast: is the set MAC a broadcast address (for E1 only)
  5024. */
  5025. static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
  5026. u32 cl_bit_vec, u8 cam_offset,
  5027. u8 is_bcast)
  5028. {
  5029. struct mac_configuration_cmd *config =
  5030. (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
  5031. int ramrod_flags = WAIT_RAMROD_COMMON;
  5032. bp->set_mac_pending = 1;
  5033. config->hdr.length = 1;
  5034. config->hdr.offset = cam_offset;
  5035. config->hdr.client_id = 0xff;
  5036. /* Mark the single MAC configuration ramrod as opposed to a
  5037. * UC/MC list configuration).
  5038. */
  5039. config->hdr.echo = 1;
  5040. /* primary MAC */
  5041. config->config_table[0].msb_mac_addr =
  5042. swab16(*(u16 *)&mac[0]);
  5043. config->config_table[0].middle_mac_addr =
  5044. swab16(*(u16 *)&mac[2]);
  5045. config->config_table[0].lsb_mac_addr =
  5046. swab16(*(u16 *)&mac[4]);
  5047. config->config_table[0].clients_bit_vector =
  5048. cpu_to_le32(cl_bit_vec);
  5049. config->config_table[0].vlan_id = 0;
  5050. config->config_table[0].pf_id = BP_FUNC(bp);
  5051. if (set)
  5052. SET_FLAG(config->config_table[0].flags,
  5053. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  5054. T_ETH_MAC_COMMAND_SET);
  5055. else
  5056. SET_FLAG(config->config_table[0].flags,
  5057. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  5058. T_ETH_MAC_COMMAND_INVALIDATE);
  5059. if (is_bcast)
  5060. SET_FLAG(config->config_table[0].flags,
  5061. MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
  5062. DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
  5063. (set ? "setting" : "clearing"),
  5064. config->config_table[0].msb_mac_addr,
  5065. config->config_table[0].middle_mac_addr,
  5066. config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
  5067. mb();
  5068. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  5069. U64_HI(bnx2x_sp_mapping(bp, mac_config)),
  5070. U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
  5071. /* Wait for a completion */
  5072. bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
  5073. }
  5074. static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
  5075. int *state_p, int flags)
  5076. {
  5077. /* can take a while if any port is running */
  5078. int cnt = 5000;
  5079. u8 poll = flags & WAIT_RAMROD_POLL;
  5080. u8 common = flags & WAIT_RAMROD_COMMON;
  5081. DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
  5082. poll ? "polling" : "waiting", state, idx);
  5083. might_sleep();
  5084. while (cnt--) {
  5085. if (poll) {
  5086. if (common)
  5087. bnx2x_eq_int(bp);
  5088. else {
  5089. bnx2x_rx_int(bp->fp, 10);
  5090. /* if index is different from 0
  5091. * the reply for some commands will
  5092. * be on the non default queue
  5093. */
  5094. if (idx)
  5095. bnx2x_rx_int(&bp->fp[idx], 10);
  5096. }
  5097. }
  5098. mb(); /* state is changed by bnx2x_sp_event() */
  5099. if (*state_p == state) {
  5100. #ifdef BNX2X_STOP_ON_ERROR
  5101. DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
  5102. #endif
  5103. return 0;
  5104. }
  5105. msleep(1);
  5106. if (bp->panic)
  5107. return -EIO;
  5108. }
  5109. /* timeout! */
  5110. BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
  5111. poll ? "polling" : "waiting", state, idx);
  5112. #ifdef BNX2X_STOP_ON_ERROR
  5113. bnx2x_panic();
  5114. #endif
  5115. return -EBUSY;
  5116. }
  5117. static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
  5118. {
  5119. if (CHIP_IS_E1H(bp))
  5120. return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
  5121. else if (CHIP_MODE_IS_4_PORT(bp))
  5122. return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
  5123. else
  5124. return E2_FUNC_MAX * rel_offset + BP_VN(bp);
  5125. }
  5126. /**
  5127. * LLH CAM line allocations: currently only iSCSI and ETH macs are
  5128. * relevant. In addition, current implementation is tuned for a
  5129. * single ETH MAC.
  5130. */
  5131. enum {
  5132. LLH_CAM_ISCSI_ETH_LINE = 0,
  5133. LLH_CAM_ETH_LINE,
  5134. LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
  5135. };
  5136. static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
  5137. int set,
  5138. unsigned char *dev_addr,
  5139. int index)
  5140. {
  5141. u32 wb_data[2];
  5142. u32 mem_offset, ena_offset, mem_index;
  5143. /**
  5144. * indexes mapping:
  5145. * 0..7 - goes to MEM
  5146. * 8..15 - goes to MEM2
  5147. */
  5148. if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
  5149. return;
  5150. /* calculate memory start offset according to the mapping
  5151. * and index in the memory */
  5152. if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
  5153. mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
  5154. NIG_REG_LLH0_FUNC_MEM;
  5155. ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
  5156. NIG_REG_LLH0_FUNC_MEM_ENABLE;
  5157. mem_index = index;
  5158. } else {
  5159. mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
  5160. NIG_REG_P0_LLH_FUNC_MEM2;
  5161. ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
  5162. NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
  5163. mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
  5164. }
  5165. if (set) {
  5166. /* LLH_FUNC_MEM is a u64 WB register */
  5167. mem_offset += 8*mem_index;
  5168. wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
  5169. (dev_addr[4] << 8) | dev_addr[5]);
  5170. wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
  5171. REG_WR_DMAE(bp, mem_offset, wb_data, 2);
  5172. }
  5173. /* enable/disable the entry */
  5174. REG_WR(bp, ena_offset + 4*mem_index, set);
  5175. }
  5176. void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
  5177. {
  5178. u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
  5179. bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
  5180. /* networking MAC */
  5181. bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
  5182. (1 << bp->fp->cl_id), cam_offset , 0);
  5183. bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
  5184. if (CHIP_IS_E1(bp)) {
  5185. /* broadcast MAC */
  5186. static const u8 bcast[ETH_ALEN] = {
  5187. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  5188. };
  5189. bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
  5190. }
  5191. }
  5192. static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
  5193. {
  5194. return CHIP_REV_IS_SLOW(bp) ?
  5195. (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
  5196. (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
  5197. }
  5198. /* set mc list, do not wait as wait implies sleep and
  5199. * set_rx_mode can be invoked from non-sleepable context.
  5200. *
  5201. * Instead we use the same ramrod data buffer each time we need
  5202. * to configure a list of addresses, and use the fact that the
  5203. * list of MACs is changed in an incremental way and that the
  5204. * function is called under the netif_addr_lock. A temporary
  5205. * inconsistent CAM configuration (possible in case of a very fast
  5206. * sequence of add/del/add on the host side) will shortly be
  5207. * restored by the handler of the last ramrod.
  5208. */
  5209. static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
  5210. {
  5211. int i = 0, old;
  5212. struct net_device *dev = bp->dev;
  5213. u8 offset = bnx2x_e1_cam_mc_offset(bp);
  5214. struct netdev_hw_addr *ha;
  5215. struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
  5216. dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
  5217. if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
  5218. return -EINVAL;
  5219. netdev_for_each_mc_addr(ha, dev) {
  5220. /* copy mac */
  5221. config_cmd->config_table[i].msb_mac_addr =
  5222. swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
  5223. config_cmd->config_table[i].middle_mac_addr =
  5224. swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
  5225. config_cmd->config_table[i].lsb_mac_addr =
  5226. swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
  5227. config_cmd->config_table[i].vlan_id = 0;
  5228. config_cmd->config_table[i].pf_id = BP_FUNC(bp);
  5229. config_cmd->config_table[i].clients_bit_vector =
  5230. cpu_to_le32(1 << BP_L_ID(bp));
  5231. SET_FLAG(config_cmd->config_table[i].flags,
  5232. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  5233. T_ETH_MAC_COMMAND_SET);
  5234. DP(NETIF_MSG_IFUP,
  5235. "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
  5236. config_cmd->config_table[i].msb_mac_addr,
  5237. config_cmd->config_table[i].middle_mac_addr,
  5238. config_cmd->config_table[i].lsb_mac_addr);
  5239. i++;
  5240. }
  5241. old = config_cmd->hdr.length;
  5242. if (old > i) {
  5243. for (; i < old; i++) {
  5244. if (CAM_IS_INVALID(config_cmd->
  5245. config_table[i])) {
  5246. /* already invalidated */
  5247. break;
  5248. }
  5249. /* invalidate */
  5250. SET_FLAG(config_cmd->config_table[i].flags,
  5251. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  5252. T_ETH_MAC_COMMAND_INVALIDATE);
  5253. }
  5254. }
  5255. wmb();
  5256. config_cmd->hdr.length = i;
  5257. config_cmd->hdr.offset = offset;
  5258. config_cmd->hdr.client_id = 0xff;
  5259. /* Mark that this ramrod doesn't use bp->set_mac_pending for
  5260. * synchronization.
  5261. */
  5262. config_cmd->hdr.echo = 0;
  5263. mb();
  5264. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  5265. U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
  5266. }
  5267. void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
  5268. {
  5269. int i;
  5270. struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
  5271. dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
  5272. int ramrod_flags = WAIT_RAMROD_COMMON;
  5273. u8 offset = bnx2x_e1_cam_mc_offset(bp);
  5274. for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
  5275. SET_FLAG(config_cmd->config_table[i].flags,
  5276. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  5277. T_ETH_MAC_COMMAND_INVALIDATE);
  5278. wmb();
  5279. config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
  5280. config_cmd->hdr.offset = offset;
  5281. config_cmd->hdr.client_id = 0xff;
  5282. /* We'll wait for a completion this time... */
  5283. config_cmd->hdr.echo = 1;
  5284. bp->set_mac_pending = 1;
  5285. mb();
  5286. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  5287. U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
  5288. /* Wait for a completion */
  5289. bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
  5290. ramrod_flags);
  5291. }
  5292. /* Accept one or more multicasts */
  5293. static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
  5294. {
  5295. struct net_device *dev = bp->dev;
  5296. struct netdev_hw_addr *ha;
  5297. u32 mc_filter[MC_HASH_SIZE];
  5298. u32 crc, bit, regidx;
  5299. int i;
  5300. memset(mc_filter, 0, 4 * MC_HASH_SIZE);
  5301. netdev_for_each_mc_addr(ha, dev) {
  5302. DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
  5303. bnx2x_mc_addr(ha));
  5304. crc = crc32c_le(0, bnx2x_mc_addr(ha),
  5305. ETH_ALEN);
  5306. bit = (crc >> 24) & 0xff;
  5307. regidx = bit >> 5;
  5308. bit &= 0x1f;
  5309. mc_filter[regidx] |= (1 << bit);
  5310. }
  5311. for (i = 0; i < MC_HASH_SIZE; i++)
  5312. REG_WR(bp, MC_HASH_OFFSET(bp, i),
  5313. mc_filter[i]);
  5314. return 0;
  5315. }
  5316. void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
  5317. {
  5318. int i;
  5319. for (i = 0; i < MC_HASH_SIZE; i++)
  5320. REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
  5321. }
  5322. #ifdef BCM_CNIC
  5323. /**
  5324. * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
  5325. *
  5326. * @bp: driver handle
  5327. * @set: set or clear the CAM entry
  5328. *
  5329. * This function will wait until the ramdord completion returns.
  5330. * Return 0 if success, -ENODEV if ramrod doesn't return.
  5331. */
  5332. static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
  5333. {
  5334. u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
  5335. bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
  5336. u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
  5337. BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
  5338. u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
  5339. u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
  5340. /* Send a SET_MAC ramrod */
  5341. bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
  5342. cam_offset, 0);
  5343. bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
  5344. return 0;
  5345. }
  5346. /**
  5347. * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
  5348. *
  5349. * @bp: driver handle
  5350. * @set: set or clear the CAM entry
  5351. *
  5352. * This function will wait until the ramrod completion returns.
  5353. * Returns 0 if success, -ENODEV if ramrod doesn't return.
  5354. */
  5355. int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
  5356. {
  5357. u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
  5358. /**
  5359. * CAM allocation for E1H
  5360. * eth unicasts: by func number
  5361. * iscsi: by func number
  5362. * fip unicast: by func number
  5363. * fip multicast: by func number
  5364. */
  5365. bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
  5366. cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
  5367. return 0;
  5368. }
  5369. int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
  5370. {
  5371. u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
  5372. /**
  5373. * CAM allocation for E1H
  5374. * eth unicasts: by func number
  5375. * iscsi: by func number
  5376. * fip unicast: by func number
  5377. * fip multicast: by func number
  5378. */
  5379. bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
  5380. bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
  5381. return 0;
  5382. }
  5383. #endif
  5384. static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
  5385. struct bnx2x_client_init_params *params,
  5386. u8 activate,
  5387. struct client_init_ramrod_data *data)
  5388. {
  5389. /* Clear the buffer */
  5390. memset(data, 0, sizeof(*data));
  5391. /* general */
  5392. data->general.client_id = params->rxq_params.cl_id;
  5393. data->general.statistics_counter_id = params->rxq_params.stat_id;
  5394. data->general.statistics_en_flg =
  5395. (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
  5396. data->general.is_fcoe_flg =
  5397. (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
  5398. data->general.activate_flg = activate;
  5399. data->general.sp_client_id = params->rxq_params.spcl_id;
  5400. /* Rx data */
  5401. data->rx.tpa_en_flg =
  5402. (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
  5403. data->rx.vmqueue_mode_en_flg = 0;
  5404. data->rx.cache_line_alignment_log_size =
  5405. params->rxq_params.cache_line_log;
  5406. data->rx.enable_dynamic_hc =
  5407. (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
  5408. data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
  5409. data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
  5410. data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
  5411. /* We don't set drop flags */
  5412. data->rx.drop_ip_cs_err_flg = 0;
  5413. data->rx.drop_tcp_cs_err_flg = 0;
  5414. data->rx.drop_ttl0_flg = 0;
  5415. data->rx.drop_udp_cs_err_flg = 0;
  5416. data->rx.inner_vlan_removal_enable_flg =
  5417. (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
  5418. data->rx.outer_vlan_removal_enable_flg =
  5419. (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
  5420. data->rx.status_block_id = params->rxq_params.fw_sb_id;
  5421. data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
  5422. data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
  5423. data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
  5424. data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
  5425. data->rx.bd_page_base.lo =
  5426. cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
  5427. data->rx.bd_page_base.hi =
  5428. cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
  5429. data->rx.sge_page_base.lo =
  5430. cpu_to_le32(U64_LO(params->rxq_params.sge_map));
  5431. data->rx.sge_page_base.hi =
  5432. cpu_to_le32(U64_HI(params->rxq_params.sge_map));
  5433. data->rx.cqe_page_base.lo =
  5434. cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
  5435. data->rx.cqe_page_base.hi =
  5436. cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
  5437. data->rx.is_leading_rss =
  5438. (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
  5439. data->rx.is_approx_mcast = data->rx.is_leading_rss;
  5440. /* Tx data */
  5441. data->tx.enforce_security_flg = 0; /* VF specific */
  5442. data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
  5443. data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
  5444. data->tx.mtu = 0; /* VF specific */
  5445. data->tx.tx_bd_page_base.lo =
  5446. cpu_to_le32(U64_LO(params->txq_params.dscr_map));
  5447. data->tx.tx_bd_page_base.hi =
  5448. cpu_to_le32(U64_HI(params->txq_params.dscr_map));
  5449. /* flow control data */
  5450. data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
  5451. data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
  5452. data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
  5453. data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
  5454. data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
  5455. data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
  5456. data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
  5457. data->fc.safc_group_num = params->txq_params.cos;
  5458. data->fc.safc_group_en_flg =
  5459. (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
  5460. data->fc.traffic_type =
  5461. (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
  5462. LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
  5463. }
  5464. static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
  5465. {
  5466. /* ustorm cxt validation */
  5467. cxt->ustorm_ag_context.cdu_usage =
  5468. CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
  5469. ETH_CONNECTION_TYPE);
  5470. /* xcontext validation */
  5471. cxt->xstorm_ag_context.cdu_reserved =
  5472. CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
  5473. ETH_CONNECTION_TYPE);
  5474. }
  5475. static int bnx2x_setup_fw_client(struct bnx2x *bp,
  5476. struct bnx2x_client_init_params *params,
  5477. u8 activate,
  5478. struct client_init_ramrod_data *data,
  5479. dma_addr_t data_mapping)
  5480. {
  5481. u16 hc_usec;
  5482. int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
  5483. int ramrod_flags = 0, rc;
  5484. /* HC and context validation values */
  5485. hc_usec = params->txq_params.hc_rate ?
  5486. 1000000 / params->txq_params.hc_rate : 0;
  5487. bnx2x_update_coalesce_sb_index(bp,
  5488. params->txq_params.fw_sb_id,
  5489. params->txq_params.sb_cq_index,
  5490. !(params->txq_params.flags & QUEUE_FLG_HC),
  5491. hc_usec);
  5492. *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
  5493. hc_usec = params->rxq_params.hc_rate ?
  5494. 1000000 / params->rxq_params.hc_rate : 0;
  5495. bnx2x_update_coalesce_sb_index(bp,
  5496. params->rxq_params.fw_sb_id,
  5497. params->rxq_params.sb_cq_index,
  5498. !(params->rxq_params.flags & QUEUE_FLG_HC),
  5499. hc_usec);
  5500. bnx2x_set_ctx_validation(params->rxq_params.cxt,
  5501. params->rxq_params.cid);
  5502. /* zero stats */
  5503. if (params->txq_params.flags & QUEUE_FLG_STATS)
  5504. storm_memset_xstats_zero(bp, BP_PORT(bp),
  5505. params->txq_params.stat_id);
  5506. if (params->rxq_params.flags & QUEUE_FLG_STATS) {
  5507. storm_memset_ustats_zero(bp, BP_PORT(bp),
  5508. params->rxq_params.stat_id);
  5509. storm_memset_tstats_zero(bp, BP_PORT(bp),
  5510. params->rxq_params.stat_id);
  5511. }
  5512. /* Fill the ramrod data */
  5513. bnx2x_fill_cl_init_data(bp, params, activate, data);
  5514. /* SETUP ramrod.
  5515. *
  5516. * bnx2x_sp_post() takes a spin_lock thus no other explict memory
  5517. * barrier except from mmiowb() is needed to impose a
  5518. * proper ordering of memory operations.
  5519. */
  5520. mmiowb();
  5521. bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
  5522. U64_HI(data_mapping), U64_LO(data_mapping), 0);
  5523. /* Wait for completion */
  5524. rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
  5525. params->ramrod_params.index,
  5526. params->ramrod_params.pstate,
  5527. ramrod_flags);
  5528. return rc;
  5529. }
  5530. /**
  5531. * bnx2x_set_int_mode - configure interrupt mode
  5532. *
  5533. * @bp: driver handle
  5534. *
  5535. * In case of MSI-X it will also try to enable MSI-X.
  5536. */
  5537. static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
  5538. {
  5539. int rc = 0;
  5540. switch (bp->int_mode) {
  5541. case INT_MODE_MSI:
  5542. bnx2x_enable_msi(bp);
  5543. /* falling through... */
  5544. case INT_MODE_INTx:
  5545. bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
  5546. DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
  5547. break;
  5548. default:
  5549. /* Set number of queues according to bp->multi_mode value */
  5550. bnx2x_set_num_queues(bp);
  5551. DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
  5552. bp->num_queues);
  5553. /* if we can't use MSI-X we only need one fp,
  5554. * so try to enable MSI-X with the requested number of fp's
  5555. * and fallback to MSI or legacy INTx with one fp
  5556. */
  5557. rc = bnx2x_enable_msix(bp);
  5558. if (rc) {
  5559. /* failed to enable MSI-X */
  5560. if (bp->multi_mode)
  5561. DP(NETIF_MSG_IFUP,
  5562. "Multi requested but failed to "
  5563. "enable MSI-X (%d), "
  5564. "set number of queues to %d\n",
  5565. bp->num_queues,
  5566. 1 + NONE_ETH_CONTEXT_USE);
  5567. bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
  5568. if (!(bp->flags & DISABLE_MSI_FLAG))
  5569. bnx2x_enable_msi(bp);
  5570. }
  5571. break;
  5572. }
  5573. return rc;
  5574. }
  5575. /* must be called prioir to any HW initializations */
  5576. static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
  5577. {
  5578. return L2_ILT_LINES(bp);
  5579. }
  5580. void bnx2x_ilt_set_info(struct bnx2x *bp)
  5581. {
  5582. struct ilt_client_info *ilt_client;
  5583. struct bnx2x_ilt *ilt = BP_ILT(bp);
  5584. u16 line = 0;
  5585. ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
  5586. DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
  5587. /* CDU */
  5588. ilt_client = &ilt->clients[ILT_CLIENT_CDU];
  5589. ilt_client->client_num = ILT_CLIENT_CDU;
  5590. ilt_client->page_size = CDU_ILT_PAGE_SZ;
  5591. ilt_client->flags = ILT_CLIENT_SKIP_MEM;
  5592. ilt_client->start = line;
  5593. line += L2_ILT_LINES(bp);
  5594. #ifdef BCM_CNIC
  5595. line += CNIC_ILT_LINES;
  5596. #endif
  5597. ilt_client->end = line - 1;
  5598. DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
  5599. "flags 0x%x, hw psz %d\n",
  5600. ilt_client->start,
  5601. ilt_client->end,
  5602. ilt_client->page_size,
  5603. ilt_client->flags,
  5604. ilog2(ilt_client->page_size >> 12));
  5605. /* QM */
  5606. if (QM_INIT(bp->qm_cid_count)) {
  5607. ilt_client = &ilt->clients[ILT_CLIENT_QM];
  5608. ilt_client->client_num = ILT_CLIENT_QM;
  5609. ilt_client->page_size = QM_ILT_PAGE_SZ;
  5610. ilt_client->flags = 0;
  5611. ilt_client->start = line;
  5612. /* 4 bytes for each cid */
  5613. line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
  5614. QM_ILT_PAGE_SZ);
  5615. ilt_client->end = line - 1;
  5616. DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
  5617. "flags 0x%x, hw psz %d\n",
  5618. ilt_client->start,
  5619. ilt_client->end,
  5620. ilt_client->page_size,
  5621. ilt_client->flags,
  5622. ilog2(ilt_client->page_size >> 12));
  5623. }
  5624. /* SRC */
  5625. ilt_client = &ilt->clients[ILT_CLIENT_SRC];
  5626. #ifdef BCM_CNIC
  5627. ilt_client->client_num = ILT_CLIENT_SRC;
  5628. ilt_client->page_size = SRC_ILT_PAGE_SZ;
  5629. ilt_client->flags = 0;
  5630. ilt_client->start = line;
  5631. line += SRC_ILT_LINES;
  5632. ilt_client->end = line - 1;
  5633. DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
  5634. "flags 0x%x, hw psz %d\n",
  5635. ilt_client->start,
  5636. ilt_client->end,
  5637. ilt_client->page_size,
  5638. ilt_client->flags,
  5639. ilog2(ilt_client->page_size >> 12));
  5640. #else
  5641. ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
  5642. #endif
  5643. /* TM */
  5644. ilt_client = &ilt->clients[ILT_CLIENT_TM];
  5645. #ifdef BCM_CNIC
  5646. ilt_client->client_num = ILT_CLIENT_TM;
  5647. ilt_client->page_size = TM_ILT_PAGE_SZ;
  5648. ilt_client->flags = 0;
  5649. ilt_client->start = line;
  5650. line += TM_ILT_LINES;
  5651. ilt_client->end = line - 1;
  5652. DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
  5653. "flags 0x%x, hw psz %d\n",
  5654. ilt_client->start,
  5655. ilt_client->end,
  5656. ilt_client->page_size,
  5657. ilt_client->flags,
  5658. ilog2(ilt_client->page_size >> 12));
  5659. #else
  5660. ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
  5661. #endif
  5662. }
  5663. int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
  5664. int is_leading)
  5665. {
  5666. struct bnx2x_client_init_params params = { {0} };
  5667. int rc;
  5668. /* reset IGU state skip FCoE L2 queue */
  5669. if (!IS_FCOE_FP(fp))
  5670. bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
  5671. IGU_INT_ENABLE, 0);
  5672. params.ramrod_params.pstate = &fp->state;
  5673. params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
  5674. params.ramrod_params.index = fp->index;
  5675. params.ramrod_params.cid = fp->cid;
  5676. #ifdef BCM_CNIC
  5677. if (IS_FCOE_FP(fp))
  5678. params.ramrod_params.flags |= CLIENT_IS_FCOE;
  5679. #endif
  5680. if (is_leading)
  5681. params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
  5682. bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
  5683. bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
  5684. rc = bnx2x_setup_fw_client(bp, &params, 1,
  5685. bnx2x_sp(bp, client_init_data),
  5686. bnx2x_sp_mapping(bp, client_init_data));
  5687. return rc;
  5688. }
  5689. static int bnx2x_stop_fw_client(struct bnx2x *bp,
  5690. struct bnx2x_client_ramrod_params *p)
  5691. {
  5692. int rc;
  5693. int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
  5694. /* halt the connection */
  5695. *p->pstate = BNX2X_FP_STATE_HALTING;
  5696. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
  5697. p->cl_id, 0);
  5698. /* Wait for completion */
  5699. rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
  5700. p->pstate, poll_flag);
  5701. if (rc) /* timeout */
  5702. return rc;
  5703. *p->pstate = BNX2X_FP_STATE_TERMINATING;
  5704. bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
  5705. p->cl_id, 0);
  5706. /* Wait for completion */
  5707. rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
  5708. p->pstate, poll_flag);
  5709. if (rc) /* timeout */
  5710. return rc;
  5711. /* delete cfc entry */
  5712. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
  5713. /* Wait for completion */
  5714. rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
  5715. p->pstate, WAIT_RAMROD_COMMON);
  5716. return rc;
  5717. }
  5718. static int bnx2x_stop_client(struct bnx2x *bp, int index)
  5719. {
  5720. struct bnx2x_client_ramrod_params client_stop = {0};
  5721. struct bnx2x_fastpath *fp = &bp->fp[index];
  5722. client_stop.index = index;
  5723. client_stop.cid = fp->cid;
  5724. client_stop.cl_id = fp->cl_id;
  5725. client_stop.pstate = &(fp->state);
  5726. client_stop.poll = 0;
  5727. return bnx2x_stop_fw_client(bp, &client_stop);
  5728. }
  5729. static void bnx2x_reset_func(struct bnx2x *bp)
  5730. {
  5731. int port = BP_PORT(bp);
  5732. int func = BP_FUNC(bp);
  5733. int i;
  5734. int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
  5735. (CHIP_IS_E2(bp) ?
  5736. offsetof(struct hc_status_block_data_e2, common) :
  5737. offsetof(struct hc_status_block_data_e1x, common));
  5738. int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
  5739. int pfid_offset = offsetof(struct pci_entity, pf_id);
  5740. /* Disable the function in the FW */
  5741. REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
  5742. REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
  5743. REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
  5744. REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
  5745. /* FP SBs */
  5746. for_each_eth_queue(bp, i) {
  5747. struct bnx2x_fastpath *fp = &bp->fp[i];
  5748. REG_WR8(bp,
  5749. BAR_CSTRORM_INTMEM +
  5750. CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
  5751. + pfunc_offset_fp + pfid_offset,
  5752. HC_FUNCTION_DISABLED);
  5753. }
  5754. /* SP SB */
  5755. REG_WR8(bp,
  5756. BAR_CSTRORM_INTMEM +
  5757. CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
  5758. pfunc_offset_sp + pfid_offset,
  5759. HC_FUNCTION_DISABLED);
  5760. for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
  5761. REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
  5762. 0);
  5763. /* Configure IGU */
  5764. if (bp->common.int_block == INT_BLOCK_HC) {
  5765. REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
  5766. REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
  5767. } else {
  5768. REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
  5769. REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
  5770. }
  5771. #ifdef BCM_CNIC
  5772. /* Disable Timer scan */
  5773. REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
  5774. /*
  5775. * Wait for at least 10ms and up to 2 second for the timers scan to
  5776. * complete
  5777. */
  5778. for (i = 0; i < 200; i++) {
  5779. msleep(10);
  5780. if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
  5781. break;
  5782. }
  5783. #endif
  5784. /* Clear ILT */
  5785. bnx2x_clear_func_ilt(bp, func);
  5786. /* Timers workaround bug for E2: if this is vnic-3,
  5787. * we need to set the entire ilt range for this timers.
  5788. */
  5789. if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
  5790. struct ilt_client_info ilt_cli;
  5791. /* use dummy TM client */
  5792. memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
  5793. ilt_cli.start = 0;
  5794. ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
  5795. ilt_cli.client_num = ILT_CLIENT_TM;
  5796. bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
  5797. }
  5798. /* this assumes that reset_port() called before reset_func()*/
  5799. if (CHIP_IS_E2(bp))
  5800. bnx2x_pf_disable(bp);
  5801. bp->dmae_ready = 0;
  5802. }
  5803. static void bnx2x_reset_port(struct bnx2x *bp)
  5804. {
  5805. int port = BP_PORT(bp);
  5806. u32 val;
  5807. REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
  5808. /* Do not rcv packets to BRB */
  5809. REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
  5810. /* Do not direct rcv packets that are not for MCP to the BRB */
  5811. REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
  5812. NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
  5813. /* Configure AEU */
  5814. REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
  5815. msleep(100);
  5816. /* Check for BRB port occupancy */
  5817. val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
  5818. if (val)
  5819. DP(NETIF_MSG_IFDOWN,
  5820. "BRB1 is not empty %d blocks are occupied\n", val);
  5821. /* TODO: Close Doorbell port? */
  5822. }
  5823. static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
  5824. {
  5825. DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
  5826. BP_ABS_FUNC(bp), reset_code);
  5827. switch (reset_code) {
  5828. case FW_MSG_CODE_DRV_UNLOAD_COMMON:
  5829. bnx2x_reset_port(bp);
  5830. bnx2x_reset_func(bp);
  5831. bnx2x_reset_common(bp);
  5832. break;
  5833. case FW_MSG_CODE_DRV_UNLOAD_PORT:
  5834. bnx2x_reset_port(bp);
  5835. bnx2x_reset_func(bp);
  5836. break;
  5837. case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
  5838. bnx2x_reset_func(bp);
  5839. break;
  5840. default:
  5841. BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
  5842. break;
  5843. }
  5844. }
  5845. #ifdef BCM_CNIC
  5846. static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
  5847. {
  5848. if (bp->flags & FCOE_MACS_SET) {
  5849. if (!IS_MF_SD(bp))
  5850. bnx2x_set_fip_eth_mac_addr(bp, 0);
  5851. bnx2x_set_all_enode_macs(bp, 0);
  5852. bp->flags &= ~FCOE_MACS_SET;
  5853. }
  5854. }
  5855. #endif
  5856. void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
  5857. {
  5858. int port = BP_PORT(bp);
  5859. u32 reset_code = 0;
  5860. int i, cnt, rc;
  5861. /* Wait until tx fastpath tasks complete */
  5862. for_each_tx_queue(bp, i) {
  5863. struct bnx2x_fastpath *fp = &bp->fp[i];
  5864. cnt = 1000;
  5865. while (bnx2x_has_tx_work_unload(fp)) {
  5866. if (!cnt) {
  5867. BNX2X_ERR("timeout waiting for queue[%d]\n",
  5868. i);
  5869. #ifdef BNX2X_STOP_ON_ERROR
  5870. bnx2x_panic();
  5871. return -EBUSY;
  5872. #else
  5873. break;
  5874. #endif
  5875. }
  5876. cnt--;
  5877. msleep(1);
  5878. }
  5879. }
  5880. /* Give HW time to discard old tx messages */
  5881. msleep(1);
  5882. bnx2x_set_eth_mac(bp, 0);
  5883. bnx2x_invalidate_uc_list(bp);
  5884. if (CHIP_IS_E1(bp))
  5885. bnx2x_invalidate_e1_mc_list(bp);
  5886. else {
  5887. bnx2x_invalidate_e1h_mc_list(bp);
  5888. REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
  5889. }
  5890. #ifdef BCM_CNIC
  5891. bnx2x_del_fcoe_eth_macs(bp);
  5892. #endif
  5893. if (unload_mode == UNLOAD_NORMAL)
  5894. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
  5895. else if (bp->flags & NO_WOL_FLAG)
  5896. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
  5897. else if (bp->wol) {
  5898. u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
  5899. u8 *mac_addr = bp->dev->dev_addr;
  5900. u32 val;
  5901. /* The mac address is written to entries 1-4 to
  5902. preserve entry 0 which is used by the PMF */
  5903. u8 entry = (BP_E1HVN(bp) + 1)*8;
  5904. val = (mac_addr[0] << 8) | mac_addr[1];
  5905. EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
  5906. val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
  5907. (mac_addr[4] << 8) | mac_addr[5];
  5908. EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
  5909. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
  5910. } else
  5911. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
  5912. /* Close multi and leading connections
  5913. Completions for ramrods are collected in a synchronous way */
  5914. for_each_queue(bp, i)
  5915. if (bnx2x_stop_client(bp, i))
  5916. #ifdef BNX2X_STOP_ON_ERROR
  5917. return;
  5918. #else
  5919. goto unload_error;
  5920. #endif
  5921. rc = bnx2x_func_stop(bp);
  5922. if (rc) {
  5923. BNX2X_ERR("Function stop failed!\n");
  5924. #ifdef BNX2X_STOP_ON_ERROR
  5925. return;
  5926. #else
  5927. goto unload_error;
  5928. #endif
  5929. }
  5930. #ifndef BNX2X_STOP_ON_ERROR
  5931. unload_error:
  5932. #endif
  5933. if (!BP_NOMCP(bp))
  5934. reset_code = bnx2x_fw_command(bp, reset_code, 0);
  5935. else {
  5936. DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
  5937. "%d, %d, %d\n", BP_PATH(bp),
  5938. load_count[BP_PATH(bp)][0],
  5939. load_count[BP_PATH(bp)][1],
  5940. load_count[BP_PATH(bp)][2]);
  5941. load_count[BP_PATH(bp)][0]--;
  5942. load_count[BP_PATH(bp)][1 + port]--;
  5943. DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
  5944. "%d, %d, %d\n", BP_PATH(bp),
  5945. load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
  5946. load_count[BP_PATH(bp)][2]);
  5947. if (load_count[BP_PATH(bp)][0] == 0)
  5948. reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
  5949. else if (load_count[BP_PATH(bp)][1 + port] == 0)
  5950. reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
  5951. else
  5952. reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
  5953. }
  5954. if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
  5955. (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
  5956. bnx2x__link_reset(bp);
  5957. /* Disable HW interrupts, NAPI */
  5958. bnx2x_netif_stop(bp, 1);
  5959. /* Release IRQs */
  5960. bnx2x_free_irq(bp);
  5961. /* Reset the chip */
  5962. bnx2x_reset_chip(bp, reset_code);
  5963. /* Report UNLOAD_DONE to MCP */
  5964. if (!BP_NOMCP(bp))
  5965. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
  5966. }
  5967. void bnx2x_disable_close_the_gate(struct bnx2x *bp)
  5968. {
  5969. u32 val;
  5970. DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
  5971. if (CHIP_IS_E1(bp)) {
  5972. int port = BP_PORT(bp);
  5973. u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
  5974. MISC_REG_AEU_MASK_ATTN_FUNC_0;
  5975. val = REG_RD(bp, addr);
  5976. val &= ~(0x300);
  5977. REG_WR(bp, addr, val);
  5978. } else if (CHIP_IS_E1H(bp)) {
  5979. val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
  5980. val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
  5981. MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
  5982. REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
  5983. }
  5984. }
  5985. /* Close gates #2, #3 and #4: */
  5986. static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
  5987. {
  5988. u32 val, addr;
  5989. /* Gates #2 and #4a are closed/opened for "not E1" only */
  5990. if (!CHIP_IS_E1(bp)) {
  5991. /* #4 */
  5992. val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
  5993. REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
  5994. close ? (val | 0x1) : (val & (~(u32)1)));
  5995. /* #2 */
  5996. val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
  5997. REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
  5998. close ? (val | 0x1) : (val & (~(u32)1)));
  5999. }
  6000. /* #3 */
  6001. addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
  6002. val = REG_RD(bp, addr);
  6003. REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
  6004. DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
  6005. close ? "closing" : "opening");
  6006. mmiowb();
  6007. }
  6008. #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
  6009. static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
  6010. {
  6011. /* Do some magic... */
  6012. u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
  6013. *magic_val = val & SHARED_MF_CLP_MAGIC;
  6014. MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
  6015. }
  6016. /**
  6017. * bnx2x_clp_reset_done - restore the value of the `magic' bit.
  6018. *
  6019. * @bp: driver handle
  6020. * @magic_val: old value of the `magic' bit.
  6021. */
  6022. static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
  6023. {
  6024. /* Restore the `magic' bit value... */
  6025. u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
  6026. MF_CFG_WR(bp, shared_mf_config.clp_mb,
  6027. (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
  6028. }
  6029. /**
  6030. * bnx2x_reset_mcp_prep - prepare for MCP reset.
  6031. *
  6032. * @bp: driver handle
  6033. * @magic_val: old value of 'magic' bit.
  6034. *
  6035. * Takes care of CLP configurations.
  6036. */
  6037. static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
  6038. {
  6039. u32 shmem;
  6040. u32 validity_offset;
  6041. DP(NETIF_MSG_HW, "Starting\n");
  6042. /* Set `magic' bit in order to save MF config */
  6043. if (!CHIP_IS_E1(bp))
  6044. bnx2x_clp_reset_prep(bp, magic_val);
  6045. /* Get shmem offset */
  6046. shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
  6047. validity_offset = offsetof(struct shmem_region, validity_map[0]);
  6048. /* Clear validity map flags */
  6049. if (shmem > 0)
  6050. REG_WR(bp, shmem + validity_offset, 0);
  6051. }
  6052. #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
  6053. #define MCP_ONE_TIMEOUT 100 /* 100 ms */
  6054. /**
  6055. * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
  6056. *
  6057. * @bp: driver handle
  6058. */
  6059. static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
  6060. {
  6061. /* special handling for emulation and FPGA,
  6062. wait 10 times longer */
  6063. if (CHIP_REV_IS_SLOW(bp))
  6064. msleep(MCP_ONE_TIMEOUT*10);
  6065. else
  6066. msleep(MCP_ONE_TIMEOUT);
  6067. }
  6068. /*
  6069. * initializes bp->common.shmem_base and waits for validity signature to appear
  6070. */
  6071. static int bnx2x_init_shmem(struct bnx2x *bp)
  6072. {
  6073. int cnt = 0;
  6074. u32 val = 0;
  6075. do {
  6076. bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
  6077. if (bp->common.shmem_base) {
  6078. val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
  6079. if (val & SHR_MEM_VALIDITY_MB)
  6080. return 0;
  6081. }
  6082. bnx2x_mcp_wait_one(bp);
  6083. } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
  6084. BNX2X_ERR("BAD MCP validity signature\n");
  6085. return -ENODEV;
  6086. }
  6087. static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
  6088. {
  6089. int rc = bnx2x_init_shmem(bp);
  6090. /* Restore the `magic' bit value */
  6091. if (!CHIP_IS_E1(bp))
  6092. bnx2x_clp_reset_done(bp, magic_val);
  6093. return rc;
  6094. }
  6095. static void bnx2x_pxp_prep(struct bnx2x *bp)
  6096. {
  6097. if (!CHIP_IS_E1(bp)) {
  6098. REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
  6099. REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
  6100. REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
  6101. mmiowb();
  6102. }
  6103. }
  6104. /*
  6105. * Reset the whole chip except for:
  6106. * - PCIE core
  6107. * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
  6108. * one reset bit)
  6109. * - IGU
  6110. * - MISC (including AEU)
  6111. * - GRC
  6112. * - RBCN, RBCP
  6113. */
  6114. static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
  6115. {
  6116. u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
  6117. not_reset_mask1 =
  6118. MISC_REGISTERS_RESET_REG_1_RST_HC |
  6119. MISC_REGISTERS_RESET_REG_1_RST_PXPV |
  6120. MISC_REGISTERS_RESET_REG_1_RST_PXP;
  6121. not_reset_mask2 =
  6122. MISC_REGISTERS_RESET_REG_2_RST_MDIO |
  6123. MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
  6124. MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
  6125. MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
  6126. MISC_REGISTERS_RESET_REG_2_RST_RBCN |
  6127. MISC_REGISTERS_RESET_REG_2_RST_GRC |
  6128. MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
  6129. MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
  6130. reset_mask1 = 0xffffffff;
  6131. if (CHIP_IS_E1(bp))
  6132. reset_mask2 = 0xffff;
  6133. else
  6134. reset_mask2 = 0x1ffff;
  6135. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
  6136. reset_mask1 & (~not_reset_mask1));
  6137. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
  6138. reset_mask2 & (~not_reset_mask2));
  6139. barrier();
  6140. mmiowb();
  6141. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
  6142. REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
  6143. mmiowb();
  6144. }
  6145. static int bnx2x_process_kill(struct bnx2x *bp)
  6146. {
  6147. int cnt = 1000;
  6148. u32 val = 0;
  6149. u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
  6150. /* Empty the Tetris buffer, wait for 1s */
  6151. do {
  6152. sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
  6153. blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
  6154. port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
  6155. port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
  6156. pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
  6157. if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
  6158. ((port_is_idle_0 & 0x1) == 0x1) &&
  6159. ((port_is_idle_1 & 0x1) == 0x1) &&
  6160. (pgl_exp_rom2 == 0xffffffff))
  6161. break;
  6162. msleep(1);
  6163. } while (cnt-- > 0);
  6164. if (cnt <= 0) {
  6165. DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
  6166. " are still"
  6167. " outstanding read requests after 1s!\n");
  6168. DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
  6169. " port_is_idle_0=0x%08x,"
  6170. " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
  6171. sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
  6172. pgl_exp_rom2);
  6173. return -EAGAIN;
  6174. }
  6175. barrier();
  6176. /* Close gates #2, #3 and #4 */
  6177. bnx2x_set_234_gates(bp, true);
  6178. /* TBD: Indicate that "process kill" is in progress to MCP */
  6179. /* Clear "unprepared" bit */
  6180. REG_WR(bp, MISC_REG_UNPREPARED, 0);
  6181. barrier();
  6182. /* Make sure all is written to the chip before the reset */
  6183. mmiowb();
  6184. /* Wait for 1ms to empty GLUE and PCI-E core queues,
  6185. * PSWHST, GRC and PSWRD Tetris buffer.
  6186. */
  6187. msleep(1);
  6188. /* Prepare to chip reset: */
  6189. /* MCP */
  6190. bnx2x_reset_mcp_prep(bp, &val);
  6191. /* PXP */
  6192. bnx2x_pxp_prep(bp);
  6193. barrier();
  6194. /* reset the chip */
  6195. bnx2x_process_kill_chip_reset(bp);
  6196. barrier();
  6197. /* Recover after reset: */
  6198. /* MCP */
  6199. if (bnx2x_reset_mcp_comp(bp, val))
  6200. return -EAGAIN;
  6201. /* PXP */
  6202. bnx2x_pxp_prep(bp);
  6203. /* Open the gates #2, #3 and #4 */
  6204. bnx2x_set_234_gates(bp, false);
  6205. /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
  6206. * reset state, re-enable attentions. */
  6207. return 0;
  6208. }
  6209. static int bnx2x_leader_reset(struct bnx2x *bp)
  6210. {
  6211. int rc = 0;
  6212. /* Try to recover after the failure */
  6213. if (bnx2x_process_kill(bp)) {
  6214. printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
  6215. bp->dev->name);
  6216. rc = -EAGAIN;
  6217. goto exit_leader_reset;
  6218. }
  6219. /* Clear "reset is in progress" bit and update the driver state */
  6220. bnx2x_set_reset_done(bp);
  6221. bp->recovery_state = BNX2X_RECOVERY_DONE;
  6222. exit_leader_reset:
  6223. bp->is_leader = 0;
  6224. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
  6225. smp_wmb();
  6226. return rc;
  6227. }
  6228. /* Assumption: runs under rtnl lock. This together with the fact
  6229. * that it's called only from bnx2x_reset_task() ensure that it
  6230. * will never be called when netif_running(bp->dev) is false.
  6231. */
  6232. static void bnx2x_parity_recover(struct bnx2x *bp)
  6233. {
  6234. DP(NETIF_MSG_HW, "Handling parity\n");
  6235. while (1) {
  6236. switch (bp->recovery_state) {
  6237. case BNX2X_RECOVERY_INIT:
  6238. DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
  6239. /* Try to get a LEADER_LOCK HW lock */
  6240. if (bnx2x_trylock_hw_lock(bp,
  6241. HW_LOCK_RESOURCE_RESERVED_08))
  6242. bp->is_leader = 1;
  6243. /* Stop the driver */
  6244. /* If interface has been removed - break */
  6245. if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
  6246. return;
  6247. bp->recovery_state = BNX2X_RECOVERY_WAIT;
  6248. /* Ensure "is_leader" and "recovery_state"
  6249. * update values are seen on other CPUs
  6250. */
  6251. smp_wmb();
  6252. break;
  6253. case BNX2X_RECOVERY_WAIT:
  6254. DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
  6255. if (bp->is_leader) {
  6256. u32 load_counter = bnx2x_get_load_cnt(bp);
  6257. if (load_counter) {
  6258. /* Wait until all other functions get
  6259. * down.
  6260. */
  6261. schedule_delayed_work(&bp->reset_task,
  6262. HZ/10);
  6263. return;
  6264. } else {
  6265. /* If all other functions got down -
  6266. * try to bring the chip back to
  6267. * normal. In any case it's an exit
  6268. * point for a leader.
  6269. */
  6270. if (bnx2x_leader_reset(bp) ||
  6271. bnx2x_nic_load(bp, LOAD_NORMAL)) {
  6272. printk(KERN_ERR"%s: Recovery "
  6273. "has failed. Power cycle is "
  6274. "needed.\n", bp->dev->name);
  6275. /* Disconnect this device */
  6276. netif_device_detach(bp->dev);
  6277. /* Block ifup for all function
  6278. * of this ASIC until
  6279. * "process kill" or power
  6280. * cycle.
  6281. */
  6282. bnx2x_set_reset_in_progress(bp);
  6283. /* Shut down the power */
  6284. bnx2x_set_power_state(bp,
  6285. PCI_D3hot);
  6286. return;
  6287. }
  6288. return;
  6289. }
  6290. } else { /* non-leader */
  6291. if (!bnx2x_reset_is_done(bp)) {
  6292. /* Try to get a LEADER_LOCK HW lock as
  6293. * long as a former leader may have
  6294. * been unloaded by the user or
  6295. * released a leadership by another
  6296. * reason.
  6297. */
  6298. if (bnx2x_trylock_hw_lock(bp,
  6299. HW_LOCK_RESOURCE_RESERVED_08)) {
  6300. /* I'm a leader now! Restart a
  6301. * switch case.
  6302. */
  6303. bp->is_leader = 1;
  6304. break;
  6305. }
  6306. schedule_delayed_work(&bp->reset_task,
  6307. HZ/10);
  6308. return;
  6309. } else { /* A leader has completed
  6310. * the "process kill". It's an exit
  6311. * point for a non-leader.
  6312. */
  6313. bnx2x_nic_load(bp, LOAD_NORMAL);
  6314. bp->recovery_state =
  6315. BNX2X_RECOVERY_DONE;
  6316. smp_wmb();
  6317. return;
  6318. }
  6319. }
  6320. default:
  6321. return;
  6322. }
  6323. }
  6324. }
  6325. /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
  6326. * scheduled on a general queue in order to prevent a dead lock.
  6327. */
  6328. static void bnx2x_reset_task(struct work_struct *work)
  6329. {
  6330. struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
  6331. #ifdef BNX2X_STOP_ON_ERROR
  6332. BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
  6333. " so reset not done to allow debug dump,\n"
  6334. KERN_ERR " you will need to reboot when done\n");
  6335. return;
  6336. #endif
  6337. rtnl_lock();
  6338. if (!netif_running(bp->dev))
  6339. goto reset_task_exit;
  6340. if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
  6341. bnx2x_parity_recover(bp);
  6342. else {
  6343. bnx2x_nic_unload(bp, UNLOAD_NORMAL);
  6344. bnx2x_nic_load(bp, LOAD_NORMAL);
  6345. }
  6346. reset_task_exit:
  6347. rtnl_unlock();
  6348. }
  6349. /* end of nic load/unload */
  6350. /*
  6351. * Init service functions
  6352. */
  6353. static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
  6354. {
  6355. u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
  6356. u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
  6357. return base + (BP_ABS_FUNC(bp)) * stride;
  6358. }
  6359. static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
  6360. {
  6361. u32 reg = bnx2x_get_pretend_reg(bp);
  6362. /* Flush all outstanding writes */
  6363. mmiowb();
  6364. /* Pretend to be function 0 */
  6365. REG_WR(bp, reg, 0);
  6366. REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
  6367. /* From now we are in the "like-E1" mode */
  6368. bnx2x_int_disable(bp);
  6369. /* Flush all outstanding writes */
  6370. mmiowb();
  6371. /* Restore the original function */
  6372. REG_WR(bp, reg, BP_ABS_FUNC(bp));
  6373. REG_RD(bp, reg);
  6374. }
  6375. static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
  6376. {
  6377. if (CHIP_IS_E1(bp))
  6378. bnx2x_int_disable(bp);
  6379. else
  6380. bnx2x_undi_int_disable_e1h(bp);
  6381. }
  6382. static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
  6383. {
  6384. u32 val;
  6385. /* Check if there is any driver already loaded */
  6386. val = REG_RD(bp, MISC_REG_UNPREPARED);
  6387. if (val == 0x1) {
  6388. /* Check if it is the UNDI driver
  6389. * UNDI driver initializes CID offset for normal bell to 0x7
  6390. */
  6391. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
  6392. val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
  6393. if (val == 0x7) {
  6394. u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
  6395. /* save our pf_num */
  6396. int orig_pf_num = bp->pf_num;
  6397. u32 swap_en;
  6398. u32 swap_val;
  6399. /* clear the UNDI indication */
  6400. REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
  6401. BNX2X_DEV_INFO("UNDI is active! reset device\n");
  6402. /* try unload UNDI on port 0 */
  6403. bp->pf_num = 0;
  6404. bp->fw_seq =
  6405. (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
  6406. DRV_MSG_SEQ_NUMBER_MASK);
  6407. reset_code = bnx2x_fw_command(bp, reset_code, 0);
  6408. /* if UNDI is loaded on the other port */
  6409. if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
  6410. /* send "DONE" for previous unload */
  6411. bnx2x_fw_command(bp,
  6412. DRV_MSG_CODE_UNLOAD_DONE, 0);
  6413. /* unload UNDI on port 1 */
  6414. bp->pf_num = 1;
  6415. bp->fw_seq =
  6416. (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
  6417. DRV_MSG_SEQ_NUMBER_MASK);
  6418. reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
  6419. bnx2x_fw_command(bp, reset_code, 0);
  6420. }
  6421. /* now it's safe to release the lock */
  6422. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
  6423. bnx2x_undi_int_disable(bp);
  6424. /* close input traffic and wait for it */
  6425. /* Do not rcv packets to BRB */
  6426. REG_WR(bp,
  6427. (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
  6428. NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
  6429. /* Do not direct rcv packets that are not for MCP to
  6430. * the BRB */
  6431. REG_WR(bp,
  6432. (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
  6433. NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
  6434. /* clear AEU */
  6435. REG_WR(bp,
  6436. (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
  6437. MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
  6438. msleep(10);
  6439. /* save NIG port swap info */
  6440. swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
  6441. swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
  6442. /* reset device */
  6443. REG_WR(bp,
  6444. GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
  6445. 0xd3ffffff);
  6446. REG_WR(bp,
  6447. GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
  6448. 0x1403);
  6449. /* take the NIG out of reset and restore swap values */
  6450. REG_WR(bp,
  6451. GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
  6452. MISC_REGISTERS_RESET_REG_1_RST_NIG);
  6453. REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
  6454. REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
  6455. /* send unload done to the MCP */
  6456. bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
  6457. /* restore our func and fw_seq */
  6458. bp->pf_num = orig_pf_num;
  6459. bp->fw_seq =
  6460. (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
  6461. DRV_MSG_SEQ_NUMBER_MASK);
  6462. } else
  6463. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
  6464. }
  6465. }
  6466. static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
  6467. {
  6468. u32 val, val2, val3, val4, id;
  6469. u16 pmc;
  6470. /* Get the chip revision id and number. */
  6471. /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
  6472. val = REG_RD(bp, MISC_REG_CHIP_NUM);
  6473. id = ((val & 0xffff) << 16);
  6474. val = REG_RD(bp, MISC_REG_CHIP_REV);
  6475. id |= ((val & 0xf) << 12);
  6476. val = REG_RD(bp, MISC_REG_CHIP_METAL);
  6477. id |= ((val & 0xff) << 4);
  6478. val = REG_RD(bp, MISC_REG_BOND_ID);
  6479. id |= (val & 0xf);
  6480. bp->common.chip_id = id;
  6481. /* Set doorbell size */
  6482. bp->db_size = (1 << BNX2X_DB_SHIFT);
  6483. if (CHIP_IS_E2(bp)) {
  6484. val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
  6485. if ((val & 1) == 0)
  6486. val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
  6487. else
  6488. val = (val >> 1) & 1;
  6489. BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
  6490. "2_PORT_MODE");
  6491. bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
  6492. CHIP_2_PORT_MODE;
  6493. if (CHIP_MODE_IS_4_PORT(bp))
  6494. bp->pfid = (bp->pf_num >> 1); /* 0..3 */
  6495. else
  6496. bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
  6497. } else {
  6498. bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
  6499. bp->pfid = bp->pf_num; /* 0..7 */
  6500. }
  6501. /*
  6502. * set base FW non-default (fast path) status block id, this value is
  6503. * used to initialize the fw_sb_id saved on the fp/queue structure to
  6504. * determine the id used by the FW.
  6505. */
  6506. if (CHIP_IS_E1x(bp))
  6507. bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
  6508. else /* E2 */
  6509. bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
  6510. bp->link_params.chip_id = bp->common.chip_id;
  6511. BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
  6512. val = (REG_RD(bp, 0x2874) & 0x55);
  6513. if ((bp->common.chip_id & 0x1) ||
  6514. (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
  6515. bp->flags |= ONE_PORT_FLAG;
  6516. BNX2X_DEV_INFO("single port device\n");
  6517. }
  6518. val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
  6519. bp->common.flash_size = (NVRAM_1MB_SIZE <<
  6520. (val & MCPR_NVM_CFG4_FLASH_SIZE));
  6521. BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
  6522. bp->common.flash_size, bp->common.flash_size);
  6523. bnx2x_init_shmem(bp);
  6524. bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
  6525. MISC_REG_GENERIC_CR_1 :
  6526. MISC_REG_GENERIC_CR_0));
  6527. bp->link_params.shmem_base = bp->common.shmem_base;
  6528. bp->link_params.shmem2_base = bp->common.shmem2_base;
  6529. BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
  6530. bp->common.shmem_base, bp->common.shmem2_base);
  6531. if (!bp->common.shmem_base) {
  6532. BNX2X_DEV_INFO("MCP not active\n");
  6533. bp->flags |= NO_MCP_FLAG;
  6534. return;
  6535. }
  6536. bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
  6537. BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
  6538. bp->link_params.hw_led_mode = ((bp->common.hw_config &
  6539. SHARED_HW_CFG_LED_MODE_MASK) >>
  6540. SHARED_HW_CFG_LED_MODE_SHIFT);
  6541. bp->link_params.feature_config_flags = 0;
  6542. val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
  6543. if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
  6544. bp->link_params.feature_config_flags |=
  6545. FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
  6546. else
  6547. bp->link_params.feature_config_flags &=
  6548. ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
  6549. val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
  6550. bp->common.bc_ver = val;
  6551. BNX2X_DEV_INFO("bc_ver %X\n", val);
  6552. if (val < BNX2X_BC_VER) {
  6553. /* for now only warn
  6554. * later we might need to enforce this */
  6555. BNX2X_ERR("This driver needs bc_ver %X but found %X, "
  6556. "please upgrade BC\n", BNX2X_BC_VER, val);
  6557. }
  6558. bp->link_params.feature_config_flags |=
  6559. (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
  6560. FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
  6561. bp->link_params.feature_config_flags |=
  6562. (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
  6563. FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
  6564. pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
  6565. bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
  6566. BNX2X_DEV_INFO("%sWoL capable\n",
  6567. (bp->flags & NO_WOL_FLAG) ? "not " : "");
  6568. val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
  6569. val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
  6570. val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
  6571. val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
  6572. dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
  6573. val, val2, val3, val4);
  6574. }
  6575. #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
  6576. #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
  6577. static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
  6578. {
  6579. int pfid = BP_FUNC(bp);
  6580. int vn = BP_E1HVN(bp);
  6581. int igu_sb_id;
  6582. u32 val;
  6583. u8 fid;
  6584. bp->igu_base_sb = 0xff;
  6585. bp->igu_sb_cnt = 0;
  6586. if (CHIP_INT_MODE_IS_BC(bp)) {
  6587. bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
  6588. NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
  6589. bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
  6590. FP_SB_MAX_E1x;
  6591. bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
  6592. (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
  6593. return;
  6594. }
  6595. /* IGU in normal mode - read CAM */
  6596. for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
  6597. igu_sb_id++) {
  6598. val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
  6599. if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
  6600. continue;
  6601. fid = IGU_FID(val);
  6602. if ((fid & IGU_FID_ENCODE_IS_PF)) {
  6603. if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
  6604. continue;
  6605. if (IGU_VEC(val) == 0)
  6606. /* default status block */
  6607. bp->igu_dsb_id = igu_sb_id;
  6608. else {
  6609. if (bp->igu_base_sb == 0xff)
  6610. bp->igu_base_sb = igu_sb_id;
  6611. bp->igu_sb_cnt++;
  6612. }
  6613. }
  6614. }
  6615. bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
  6616. NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
  6617. if (bp->igu_sb_cnt == 0)
  6618. BNX2X_ERR("CAM configuration error\n");
  6619. }
  6620. static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
  6621. u32 switch_cfg)
  6622. {
  6623. int cfg_size = 0, idx, port = BP_PORT(bp);
  6624. /* Aggregation of supported attributes of all external phys */
  6625. bp->port.supported[0] = 0;
  6626. bp->port.supported[1] = 0;
  6627. switch (bp->link_params.num_phys) {
  6628. case 1:
  6629. bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
  6630. cfg_size = 1;
  6631. break;
  6632. case 2:
  6633. bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
  6634. cfg_size = 1;
  6635. break;
  6636. case 3:
  6637. if (bp->link_params.multi_phy_config &
  6638. PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
  6639. bp->port.supported[1] =
  6640. bp->link_params.phy[EXT_PHY1].supported;
  6641. bp->port.supported[0] =
  6642. bp->link_params.phy[EXT_PHY2].supported;
  6643. } else {
  6644. bp->port.supported[0] =
  6645. bp->link_params.phy[EXT_PHY1].supported;
  6646. bp->port.supported[1] =
  6647. bp->link_params.phy[EXT_PHY2].supported;
  6648. }
  6649. cfg_size = 2;
  6650. break;
  6651. }
  6652. if (!(bp->port.supported[0] || bp->port.supported[1])) {
  6653. BNX2X_ERR("NVRAM config error. BAD phy config."
  6654. "PHY1 config 0x%x, PHY2 config 0x%x\n",
  6655. SHMEM_RD(bp,
  6656. dev_info.port_hw_config[port].external_phy_config),
  6657. SHMEM_RD(bp,
  6658. dev_info.port_hw_config[port].external_phy_config2));
  6659. return;
  6660. }
  6661. switch (switch_cfg) {
  6662. case SWITCH_CFG_1G:
  6663. bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
  6664. port*0x10);
  6665. BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
  6666. break;
  6667. case SWITCH_CFG_10G:
  6668. bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
  6669. port*0x18);
  6670. BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
  6671. break;
  6672. default:
  6673. BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
  6674. bp->port.link_config[0]);
  6675. return;
  6676. }
  6677. /* mask what we support according to speed_cap_mask per configuration */
  6678. for (idx = 0; idx < cfg_size; idx++) {
  6679. if (!(bp->link_params.speed_cap_mask[idx] &
  6680. PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
  6681. bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
  6682. if (!(bp->link_params.speed_cap_mask[idx] &
  6683. PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
  6684. bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
  6685. if (!(bp->link_params.speed_cap_mask[idx] &
  6686. PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
  6687. bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
  6688. if (!(bp->link_params.speed_cap_mask[idx] &
  6689. PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
  6690. bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
  6691. if (!(bp->link_params.speed_cap_mask[idx] &
  6692. PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
  6693. bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
  6694. SUPPORTED_1000baseT_Full);
  6695. if (!(bp->link_params.speed_cap_mask[idx] &
  6696. PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
  6697. bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
  6698. if (!(bp->link_params.speed_cap_mask[idx] &
  6699. PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
  6700. bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
  6701. }
  6702. BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
  6703. bp->port.supported[1]);
  6704. }
  6705. static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
  6706. {
  6707. u32 link_config, idx, cfg_size = 0;
  6708. bp->port.advertising[0] = 0;
  6709. bp->port.advertising[1] = 0;
  6710. switch (bp->link_params.num_phys) {
  6711. case 1:
  6712. case 2:
  6713. cfg_size = 1;
  6714. break;
  6715. case 3:
  6716. cfg_size = 2;
  6717. break;
  6718. }
  6719. for (idx = 0; idx < cfg_size; idx++) {
  6720. bp->link_params.req_duplex[idx] = DUPLEX_FULL;
  6721. link_config = bp->port.link_config[idx];
  6722. switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
  6723. case PORT_FEATURE_LINK_SPEED_AUTO:
  6724. if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
  6725. bp->link_params.req_line_speed[idx] =
  6726. SPEED_AUTO_NEG;
  6727. bp->port.advertising[idx] |=
  6728. bp->port.supported[idx];
  6729. } else {
  6730. /* force 10G, no AN */
  6731. bp->link_params.req_line_speed[idx] =
  6732. SPEED_10000;
  6733. bp->port.advertising[idx] |=
  6734. (ADVERTISED_10000baseT_Full |
  6735. ADVERTISED_FIBRE);
  6736. continue;
  6737. }
  6738. break;
  6739. case PORT_FEATURE_LINK_SPEED_10M_FULL:
  6740. if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
  6741. bp->link_params.req_line_speed[idx] =
  6742. SPEED_10;
  6743. bp->port.advertising[idx] |=
  6744. (ADVERTISED_10baseT_Full |
  6745. ADVERTISED_TP);
  6746. } else {
  6747. BNX2X_ERROR("NVRAM config error. "
  6748. "Invalid link_config 0x%x"
  6749. " speed_cap_mask 0x%x\n",
  6750. link_config,
  6751. bp->link_params.speed_cap_mask[idx]);
  6752. return;
  6753. }
  6754. break;
  6755. case PORT_FEATURE_LINK_SPEED_10M_HALF:
  6756. if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
  6757. bp->link_params.req_line_speed[idx] =
  6758. SPEED_10;
  6759. bp->link_params.req_duplex[idx] =
  6760. DUPLEX_HALF;
  6761. bp->port.advertising[idx] |=
  6762. (ADVERTISED_10baseT_Half |
  6763. ADVERTISED_TP);
  6764. } else {
  6765. BNX2X_ERROR("NVRAM config error. "
  6766. "Invalid link_config 0x%x"
  6767. " speed_cap_mask 0x%x\n",
  6768. link_config,
  6769. bp->link_params.speed_cap_mask[idx]);
  6770. return;
  6771. }
  6772. break;
  6773. case PORT_FEATURE_LINK_SPEED_100M_FULL:
  6774. if (bp->port.supported[idx] &
  6775. SUPPORTED_100baseT_Full) {
  6776. bp->link_params.req_line_speed[idx] =
  6777. SPEED_100;
  6778. bp->port.advertising[idx] |=
  6779. (ADVERTISED_100baseT_Full |
  6780. ADVERTISED_TP);
  6781. } else {
  6782. BNX2X_ERROR("NVRAM config error. "
  6783. "Invalid link_config 0x%x"
  6784. " speed_cap_mask 0x%x\n",
  6785. link_config,
  6786. bp->link_params.speed_cap_mask[idx]);
  6787. return;
  6788. }
  6789. break;
  6790. case PORT_FEATURE_LINK_SPEED_100M_HALF:
  6791. if (bp->port.supported[idx] &
  6792. SUPPORTED_100baseT_Half) {
  6793. bp->link_params.req_line_speed[idx] =
  6794. SPEED_100;
  6795. bp->link_params.req_duplex[idx] =
  6796. DUPLEX_HALF;
  6797. bp->port.advertising[idx] |=
  6798. (ADVERTISED_100baseT_Half |
  6799. ADVERTISED_TP);
  6800. } else {
  6801. BNX2X_ERROR("NVRAM config error. "
  6802. "Invalid link_config 0x%x"
  6803. " speed_cap_mask 0x%x\n",
  6804. link_config,
  6805. bp->link_params.speed_cap_mask[idx]);
  6806. return;
  6807. }
  6808. break;
  6809. case PORT_FEATURE_LINK_SPEED_1G:
  6810. if (bp->port.supported[idx] &
  6811. SUPPORTED_1000baseT_Full) {
  6812. bp->link_params.req_line_speed[idx] =
  6813. SPEED_1000;
  6814. bp->port.advertising[idx] |=
  6815. (ADVERTISED_1000baseT_Full |
  6816. ADVERTISED_TP);
  6817. } else {
  6818. BNX2X_ERROR("NVRAM config error. "
  6819. "Invalid link_config 0x%x"
  6820. " speed_cap_mask 0x%x\n",
  6821. link_config,
  6822. bp->link_params.speed_cap_mask[idx]);
  6823. return;
  6824. }
  6825. break;
  6826. case PORT_FEATURE_LINK_SPEED_2_5G:
  6827. if (bp->port.supported[idx] &
  6828. SUPPORTED_2500baseX_Full) {
  6829. bp->link_params.req_line_speed[idx] =
  6830. SPEED_2500;
  6831. bp->port.advertising[idx] |=
  6832. (ADVERTISED_2500baseX_Full |
  6833. ADVERTISED_TP);
  6834. } else {
  6835. BNX2X_ERROR("NVRAM config error. "
  6836. "Invalid link_config 0x%x"
  6837. " speed_cap_mask 0x%x\n",
  6838. link_config,
  6839. bp->link_params.speed_cap_mask[idx]);
  6840. return;
  6841. }
  6842. break;
  6843. case PORT_FEATURE_LINK_SPEED_10G_CX4:
  6844. case PORT_FEATURE_LINK_SPEED_10G_KX4:
  6845. case PORT_FEATURE_LINK_SPEED_10G_KR:
  6846. if (bp->port.supported[idx] &
  6847. SUPPORTED_10000baseT_Full) {
  6848. bp->link_params.req_line_speed[idx] =
  6849. SPEED_10000;
  6850. bp->port.advertising[idx] |=
  6851. (ADVERTISED_10000baseT_Full |
  6852. ADVERTISED_FIBRE);
  6853. } else {
  6854. BNX2X_ERROR("NVRAM config error. "
  6855. "Invalid link_config 0x%x"
  6856. " speed_cap_mask 0x%x\n",
  6857. link_config,
  6858. bp->link_params.speed_cap_mask[idx]);
  6859. return;
  6860. }
  6861. break;
  6862. default:
  6863. BNX2X_ERROR("NVRAM config error. "
  6864. "BAD link speed link_config 0x%x\n",
  6865. link_config);
  6866. bp->link_params.req_line_speed[idx] =
  6867. SPEED_AUTO_NEG;
  6868. bp->port.advertising[idx] =
  6869. bp->port.supported[idx];
  6870. break;
  6871. }
  6872. bp->link_params.req_flow_ctrl[idx] = (link_config &
  6873. PORT_FEATURE_FLOW_CONTROL_MASK);
  6874. if ((bp->link_params.req_flow_ctrl[idx] ==
  6875. BNX2X_FLOW_CTRL_AUTO) &&
  6876. !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
  6877. bp->link_params.req_flow_ctrl[idx] =
  6878. BNX2X_FLOW_CTRL_NONE;
  6879. }
  6880. BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
  6881. " 0x%x advertising 0x%x\n",
  6882. bp->link_params.req_line_speed[idx],
  6883. bp->link_params.req_duplex[idx],
  6884. bp->link_params.req_flow_ctrl[idx],
  6885. bp->port.advertising[idx]);
  6886. }
  6887. }
  6888. static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
  6889. {
  6890. mac_hi = cpu_to_be16(mac_hi);
  6891. mac_lo = cpu_to_be32(mac_lo);
  6892. memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
  6893. memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
  6894. }
  6895. static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
  6896. {
  6897. int port = BP_PORT(bp);
  6898. u32 config;
  6899. u32 ext_phy_type, ext_phy_config;
  6900. bp->link_params.bp = bp;
  6901. bp->link_params.port = port;
  6902. bp->link_params.lane_config =
  6903. SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
  6904. bp->link_params.speed_cap_mask[0] =
  6905. SHMEM_RD(bp,
  6906. dev_info.port_hw_config[port].speed_capability_mask);
  6907. bp->link_params.speed_cap_mask[1] =
  6908. SHMEM_RD(bp,
  6909. dev_info.port_hw_config[port].speed_capability_mask2);
  6910. bp->port.link_config[0] =
  6911. SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
  6912. bp->port.link_config[1] =
  6913. SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
  6914. bp->link_params.multi_phy_config =
  6915. SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
  6916. /* If the device is capable of WoL, set the default state according
  6917. * to the HW
  6918. */
  6919. config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
  6920. bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
  6921. (config & PORT_FEATURE_WOL_ENABLED));
  6922. BNX2X_DEV_INFO("lane_config 0x%08x "
  6923. "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
  6924. bp->link_params.lane_config,
  6925. bp->link_params.speed_cap_mask[0],
  6926. bp->port.link_config[0]);
  6927. bp->link_params.switch_cfg = (bp->port.link_config[0] &
  6928. PORT_FEATURE_CONNECTED_SWITCH_MASK);
  6929. bnx2x_phy_probe(&bp->link_params);
  6930. bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
  6931. bnx2x_link_settings_requested(bp);
  6932. /*
  6933. * If connected directly, work with the internal PHY, otherwise, work
  6934. * with the external PHY
  6935. */
  6936. ext_phy_config =
  6937. SHMEM_RD(bp,
  6938. dev_info.port_hw_config[port].external_phy_config);
  6939. ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
  6940. if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
  6941. bp->mdio.prtad = bp->port.phy_addr;
  6942. else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
  6943. (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
  6944. bp->mdio.prtad =
  6945. XGXS_EXT_PHY_ADDR(ext_phy_config);
  6946. /*
  6947. * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
  6948. * In MF mode, it is set to cover self test cases
  6949. */
  6950. if (IS_MF(bp))
  6951. bp->port.need_hw_lock = 1;
  6952. else
  6953. bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
  6954. bp->common.shmem_base,
  6955. bp->common.shmem2_base);
  6956. }
  6957. #ifdef BCM_CNIC
  6958. static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
  6959. {
  6960. u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
  6961. drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
  6962. u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
  6963. drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
  6964. /* Get the number of maximum allowed iSCSI and FCoE connections */
  6965. bp->cnic_eth_dev.max_iscsi_conn =
  6966. (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
  6967. BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
  6968. bp->cnic_eth_dev.max_fcoe_conn =
  6969. (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
  6970. BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
  6971. BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
  6972. bp->cnic_eth_dev.max_iscsi_conn,
  6973. bp->cnic_eth_dev.max_fcoe_conn);
  6974. /* If mamimum allowed number of connections is zero -
  6975. * disable the feature.
  6976. */
  6977. if (!bp->cnic_eth_dev.max_iscsi_conn)
  6978. bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
  6979. if (!bp->cnic_eth_dev.max_fcoe_conn)
  6980. bp->flags |= NO_FCOE_FLAG;
  6981. }
  6982. #endif
  6983. static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
  6984. {
  6985. u32 val, val2;
  6986. int func = BP_ABS_FUNC(bp);
  6987. int port = BP_PORT(bp);
  6988. #ifdef BCM_CNIC
  6989. u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
  6990. u8 *fip_mac = bp->fip_mac;
  6991. #endif
  6992. if (BP_NOMCP(bp)) {
  6993. BNX2X_ERROR("warning: random MAC workaround active\n");
  6994. random_ether_addr(bp->dev->dev_addr);
  6995. } else if (IS_MF(bp)) {
  6996. val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
  6997. val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
  6998. if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
  6999. (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
  7000. bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
  7001. #ifdef BCM_CNIC
  7002. /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
  7003. * FCoE MAC then the appropriate feature should be disabled.
  7004. */
  7005. if (IS_MF_SI(bp)) {
  7006. u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
  7007. if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
  7008. val2 = MF_CFG_RD(bp, func_ext_config[func].
  7009. iscsi_mac_addr_upper);
  7010. val = MF_CFG_RD(bp, func_ext_config[func].
  7011. iscsi_mac_addr_lower);
  7012. BNX2X_DEV_INFO("Read iSCSI MAC: "
  7013. "0x%x:0x%04x\n", val2, val);
  7014. bnx2x_set_mac_buf(iscsi_mac, val, val2);
  7015. } else
  7016. bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
  7017. if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
  7018. val2 = MF_CFG_RD(bp, func_ext_config[func].
  7019. fcoe_mac_addr_upper);
  7020. val = MF_CFG_RD(bp, func_ext_config[func].
  7021. fcoe_mac_addr_lower);
  7022. BNX2X_DEV_INFO("Read FCoE MAC to "
  7023. "0x%x:0x%04x\n", val2, val);
  7024. bnx2x_set_mac_buf(fip_mac, val, val2);
  7025. } else
  7026. bp->flags |= NO_FCOE_FLAG;
  7027. }
  7028. #endif
  7029. } else {
  7030. /* in SF read MACs from port configuration */
  7031. val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
  7032. val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
  7033. bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
  7034. #ifdef BCM_CNIC
  7035. val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
  7036. iscsi_mac_upper);
  7037. val = SHMEM_RD(bp, dev_info.port_hw_config[port].
  7038. iscsi_mac_lower);
  7039. bnx2x_set_mac_buf(iscsi_mac, val, val2);
  7040. #endif
  7041. }
  7042. memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
  7043. memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
  7044. #ifdef BCM_CNIC
  7045. /* Set the FCoE MAC in modes other then MF_SI */
  7046. if (!CHIP_IS_E1x(bp)) {
  7047. if (IS_MF_SD(bp))
  7048. memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
  7049. else if (!IS_MF(bp))
  7050. memcpy(fip_mac, iscsi_mac, ETH_ALEN);
  7051. }
  7052. /* Disable iSCSI if MAC configuration is
  7053. * invalid.
  7054. */
  7055. if (!is_valid_ether_addr(iscsi_mac)) {
  7056. bp->flags |= NO_ISCSI_FLAG;
  7057. memset(iscsi_mac, 0, ETH_ALEN);
  7058. }
  7059. /* Disable FCoE if MAC configuration is
  7060. * invalid.
  7061. */
  7062. if (!is_valid_ether_addr(fip_mac)) {
  7063. bp->flags |= NO_FCOE_FLAG;
  7064. memset(bp->fip_mac, 0, ETH_ALEN);
  7065. }
  7066. #endif
  7067. }
  7068. static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
  7069. {
  7070. int /*abs*/func = BP_ABS_FUNC(bp);
  7071. int vn;
  7072. u32 val = 0;
  7073. int rc = 0;
  7074. bnx2x_get_common_hwinfo(bp);
  7075. if (CHIP_IS_E1x(bp)) {
  7076. bp->common.int_block = INT_BLOCK_HC;
  7077. bp->igu_dsb_id = DEF_SB_IGU_ID;
  7078. bp->igu_base_sb = 0;
  7079. bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
  7080. NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
  7081. } else {
  7082. bp->common.int_block = INT_BLOCK_IGU;
  7083. val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
  7084. if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
  7085. DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
  7086. bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
  7087. } else
  7088. DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
  7089. bnx2x_get_igu_cam_info(bp);
  7090. }
  7091. DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
  7092. bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
  7093. /*
  7094. * Initialize MF configuration
  7095. */
  7096. bp->mf_ov = 0;
  7097. bp->mf_mode = 0;
  7098. vn = BP_E1HVN(bp);
  7099. if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
  7100. DP(NETIF_MSG_PROBE,
  7101. "shmem2base 0x%x, size %d, mfcfg offset %d\n",
  7102. bp->common.shmem2_base, SHMEM2_RD(bp, size),
  7103. (u32)offsetof(struct shmem2_region, mf_cfg_addr));
  7104. if (SHMEM2_HAS(bp, mf_cfg_addr))
  7105. bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
  7106. else
  7107. bp->common.mf_cfg_base = bp->common.shmem_base +
  7108. offsetof(struct shmem_region, func_mb) +
  7109. E1H_FUNC_MAX * sizeof(struct drv_func_mb);
  7110. /*
  7111. * get mf configuration:
  7112. * 1. existence of MF configuration
  7113. * 2. MAC address must be legal (check only upper bytes)
  7114. * for Switch-Independent mode;
  7115. * OVLAN must be legal for Switch-Dependent mode
  7116. * 3. SF_MODE configures specific MF mode
  7117. */
  7118. if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
  7119. /* get mf configuration */
  7120. val = SHMEM_RD(bp,
  7121. dev_info.shared_feature_config.config);
  7122. val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
  7123. switch (val) {
  7124. case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
  7125. val = MF_CFG_RD(bp, func_mf_config[func].
  7126. mac_upper);
  7127. /* check for legal mac (upper bytes)*/
  7128. if (val != 0xffff) {
  7129. bp->mf_mode = MULTI_FUNCTION_SI;
  7130. bp->mf_config[vn] = MF_CFG_RD(bp,
  7131. func_mf_config[func].config);
  7132. } else
  7133. DP(NETIF_MSG_PROBE, "illegal MAC "
  7134. "address for SI\n");
  7135. break;
  7136. case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
  7137. /* get OV configuration */
  7138. val = MF_CFG_RD(bp,
  7139. func_mf_config[FUNC_0].e1hov_tag);
  7140. val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
  7141. if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
  7142. bp->mf_mode = MULTI_FUNCTION_SD;
  7143. bp->mf_config[vn] = MF_CFG_RD(bp,
  7144. func_mf_config[func].config);
  7145. } else
  7146. DP(NETIF_MSG_PROBE, "illegal OV for "
  7147. "SD\n");
  7148. break;
  7149. default:
  7150. /* Unknown configuration: reset mf_config */
  7151. bp->mf_config[vn] = 0;
  7152. DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n",
  7153. val);
  7154. }
  7155. }
  7156. BNX2X_DEV_INFO("%s function mode\n",
  7157. IS_MF(bp) ? "multi" : "single");
  7158. switch (bp->mf_mode) {
  7159. case MULTI_FUNCTION_SD:
  7160. val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
  7161. FUNC_MF_CFG_E1HOV_TAG_MASK;
  7162. if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
  7163. bp->mf_ov = val;
  7164. BNX2X_DEV_INFO("MF OV for func %d is %d"
  7165. " (0x%04x)\n", func,
  7166. bp->mf_ov, bp->mf_ov);
  7167. } else {
  7168. BNX2X_ERR("No valid MF OV for func %d,"
  7169. " aborting\n", func);
  7170. rc = -EPERM;
  7171. }
  7172. break;
  7173. case MULTI_FUNCTION_SI:
  7174. BNX2X_DEV_INFO("func %d is in MF "
  7175. "switch-independent mode\n", func);
  7176. break;
  7177. default:
  7178. if (vn) {
  7179. BNX2X_ERR("VN %d in single function mode,"
  7180. " aborting\n", vn);
  7181. rc = -EPERM;
  7182. }
  7183. break;
  7184. }
  7185. }
  7186. /* adjust igu_sb_cnt to MF for E1x */
  7187. if (CHIP_IS_E1x(bp) && IS_MF(bp))
  7188. bp->igu_sb_cnt /= E1HVN_MAX;
  7189. /*
  7190. * adjust E2 sb count: to be removed when FW will support
  7191. * more then 16 L2 clients
  7192. */
  7193. #define MAX_L2_CLIENTS 16
  7194. if (CHIP_IS_E2(bp))
  7195. bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
  7196. MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
  7197. if (!BP_NOMCP(bp)) {
  7198. bnx2x_get_port_hwinfo(bp);
  7199. bp->fw_seq =
  7200. (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
  7201. DRV_MSG_SEQ_NUMBER_MASK);
  7202. BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
  7203. }
  7204. /* Get MAC addresses */
  7205. bnx2x_get_mac_hwinfo(bp);
  7206. #ifdef BCM_CNIC
  7207. bnx2x_get_cnic_info(bp);
  7208. #endif
  7209. return rc;
  7210. }
  7211. static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
  7212. {
  7213. int cnt, i, block_end, rodi;
  7214. char vpd_data[BNX2X_VPD_LEN+1];
  7215. char str_id_reg[VENDOR_ID_LEN+1];
  7216. char str_id_cap[VENDOR_ID_LEN+1];
  7217. u8 len;
  7218. cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
  7219. memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
  7220. if (cnt < BNX2X_VPD_LEN)
  7221. goto out_not_found;
  7222. i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
  7223. PCI_VPD_LRDT_RO_DATA);
  7224. if (i < 0)
  7225. goto out_not_found;
  7226. block_end = i + PCI_VPD_LRDT_TAG_SIZE +
  7227. pci_vpd_lrdt_size(&vpd_data[i]);
  7228. i += PCI_VPD_LRDT_TAG_SIZE;
  7229. if (block_end > BNX2X_VPD_LEN)
  7230. goto out_not_found;
  7231. rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
  7232. PCI_VPD_RO_KEYWORD_MFR_ID);
  7233. if (rodi < 0)
  7234. goto out_not_found;
  7235. len = pci_vpd_info_field_size(&vpd_data[rodi]);
  7236. if (len != VENDOR_ID_LEN)
  7237. goto out_not_found;
  7238. rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
  7239. /* vendor specific info */
  7240. snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
  7241. snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
  7242. if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
  7243. !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
  7244. rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
  7245. PCI_VPD_RO_KEYWORD_VENDOR0);
  7246. if (rodi >= 0) {
  7247. len = pci_vpd_info_field_size(&vpd_data[rodi]);
  7248. rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
  7249. if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
  7250. memcpy(bp->fw_ver, &vpd_data[rodi], len);
  7251. bp->fw_ver[len] = ' ';
  7252. }
  7253. }
  7254. return;
  7255. }
  7256. out_not_found:
  7257. return;
  7258. }
  7259. static int __devinit bnx2x_init_bp(struct bnx2x *bp)
  7260. {
  7261. int func;
  7262. int timer_interval;
  7263. int rc;
  7264. /* Disable interrupt handling until HW is initialized */
  7265. atomic_set(&bp->intr_sem, 1);
  7266. smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
  7267. mutex_init(&bp->port.phy_mutex);
  7268. mutex_init(&bp->fw_mb_mutex);
  7269. spin_lock_init(&bp->stats_lock);
  7270. #ifdef BCM_CNIC
  7271. mutex_init(&bp->cnic_mutex);
  7272. #endif
  7273. INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
  7274. INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
  7275. rc = bnx2x_get_hwinfo(bp);
  7276. if (!rc)
  7277. rc = bnx2x_alloc_mem_bp(bp);
  7278. bnx2x_read_fwinfo(bp);
  7279. func = BP_FUNC(bp);
  7280. /* need to reset chip if undi was active */
  7281. if (!BP_NOMCP(bp))
  7282. bnx2x_undi_unload(bp);
  7283. if (CHIP_REV_IS_FPGA(bp))
  7284. dev_err(&bp->pdev->dev, "FPGA detected\n");
  7285. if (BP_NOMCP(bp) && (func == 0))
  7286. dev_err(&bp->pdev->dev, "MCP disabled, "
  7287. "must load devices in order!\n");
  7288. bp->multi_mode = multi_mode;
  7289. bp->int_mode = int_mode;
  7290. /* Set TPA flags */
  7291. if (disable_tpa) {
  7292. bp->flags &= ~TPA_ENABLE_FLAG;
  7293. bp->dev->features &= ~NETIF_F_LRO;
  7294. } else {
  7295. bp->flags |= TPA_ENABLE_FLAG;
  7296. bp->dev->features |= NETIF_F_LRO;
  7297. }
  7298. bp->disable_tpa = disable_tpa;
  7299. if (CHIP_IS_E1(bp))
  7300. bp->dropless_fc = 0;
  7301. else
  7302. bp->dropless_fc = dropless_fc;
  7303. bp->mrrs = mrrs;
  7304. bp->tx_ring_size = MAX_TX_AVAIL;
  7305. /* make sure that the numbers are in the right granularity */
  7306. bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
  7307. bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
  7308. timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
  7309. bp->current_interval = (poll ? poll : timer_interval);
  7310. init_timer(&bp->timer);
  7311. bp->timer.expires = jiffies + bp->current_interval;
  7312. bp->timer.data = (unsigned long) bp;
  7313. bp->timer.function = bnx2x_timer;
  7314. bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
  7315. bnx2x_dcbx_init_params(bp);
  7316. return rc;
  7317. }
  7318. /****************************************************************************
  7319. * General service functions
  7320. ****************************************************************************/
  7321. /* called with rtnl_lock */
  7322. static int bnx2x_open(struct net_device *dev)
  7323. {
  7324. struct bnx2x *bp = netdev_priv(dev);
  7325. netif_carrier_off(dev);
  7326. bnx2x_set_power_state(bp, PCI_D0);
  7327. if (!bnx2x_reset_is_done(bp)) {
  7328. do {
  7329. /* Reset MCP mail box sequence if there is on going
  7330. * recovery
  7331. */
  7332. bp->fw_seq = 0;
  7333. /* If it's the first function to load and reset done
  7334. * is still not cleared it may mean that. We don't
  7335. * check the attention state here because it may have
  7336. * already been cleared by a "common" reset but we
  7337. * shell proceed with "process kill" anyway.
  7338. */
  7339. if ((bnx2x_get_load_cnt(bp) == 0) &&
  7340. bnx2x_trylock_hw_lock(bp,
  7341. HW_LOCK_RESOURCE_RESERVED_08) &&
  7342. (!bnx2x_leader_reset(bp))) {
  7343. DP(NETIF_MSG_HW, "Recovered in open\n");
  7344. break;
  7345. }
  7346. bnx2x_set_power_state(bp, PCI_D3hot);
  7347. printk(KERN_ERR"%s: Recovery flow hasn't been properly"
  7348. " completed yet. Try again later. If u still see this"
  7349. " message after a few retries then power cycle is"
  7350. " required.\n", bp->dev->name);
  7351. return -EAGAIN;
  7352. } while (0);
  7353. }
  7354. bp->recovery_state = BNX2X_RECOVERY_DONE;
  7355. return bnx2x_nic_load(bp, LOAD_OPEN);
  7356. }
  7357. /* called with rtnl_lock */
  7358. static int bnx2x_close(struct net_device *dev)
  7359. {
  7360. struct bnx2x *bp = netdev_priv(dev);
  7361. /* Unload the driver, release IRQs */
  7362. bnx2x_nic_unload(bp, UNLOAD_CLOSE);
  7363. bnx2x_set_power_state(bp, PCI_D3hot);
  7364. return 0;
  7365. }
  7366. #define E1_MAX_UC_LIST 29
  7367. #define E1H_MAX_UC_LIST 30
  7368. #define E2_MAX_UC_LIST 14
  7369. static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
  7370. {
  7371. if (CHIP_IS_E1(bp))
  7372. return E1_MAX_UC_LIST;
  7373. else if (CHIP_IS_E1H(bp))
  7374. return E1H_MAX_UC_LIST;
  7375. else
  7376. return E2_MAX_UC_LIST;
  7377. }
  7378. static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
  7379. {
  7380. if (CHIP_IS_E1(bp))
  7381. /* CAM Entries for Port0:
  7382. * 0 - prim ETH MAC
  7383. * 1 - BCAST MAC
  7384. * 2 - iSCSI L2 ring ETH MAC
  7385. * 3-31 - UC MACs
  7386. *
  7387. * Port1 entries are allocated the same way starting from
  7388. * entry 32.
  7389. */
  7390. return 3 + 32 * BP_PORT(bp);
  7391. else if (CHIP_IS_E1H(bp)) {
  7392. /* CAM Entries:
  7393. * 0-7 - prim ETH MAC for each function
  7394. * 8-15 - iSCSI L2 ring ETH MAC for each function
  7395. * 16 till 255 UC MAC lists for each function
  7396. *
  7397. * Remark: There is no FCoE support for E1H, thus FCoE related
  7398. * MACs are not considered.
  7399. */
  7400. return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
  7401. bnx2x_max_uc_list(bp) * BP_FUNC(bp);
  7402. } else {
  7403. /* CAM Entries (there is a separate CAM per engine):
  7404. * 0-4 - prim ETH MAC for each function
  7405. * 4-7 - iSCSI L2 ring ETH MAC for each function
  7406. * 8-11 - FIP ucast L2 MAC for each function
  7407. * 12-15 - ALL_ENODE_MACS mcast MAC for each function
  7408. * 16 till 71 UC MAC lists for each function
  7409. */
  7410. u8 func_idx =
  7411. (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
  7412. return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
  7413. bnx2x_max_uc_list(bp) * func_idx;
  7414. }
  7415. }
  7416. /* set uc list, do not wait as wait implies sleep and
  7417. * set_rx_mode can be invoked from non-sleepable context.
  7418. *
  7419. * Instead we use the same ramrod data buffer each time we need
  7420. * to configure a list of addresses, and use the fact that the
  7421. * list of MACs is changed in an incremental way and that the
  7422. * function is called under the netif_addr_lock. A temporary
  7423. * inconsistent CAM configuration (possible in case of very fast
  7424. * sequence of add/del/add on the host side) will shortly be
  7425. * restored by the handler of the last ramrod.
  7426. */
  7427. static int bnx2x_set_uc_list(struct bnx2x *bp)
  7428. {
  7429. int i = 0, old;
  7430. struct net_device *dev = bp->dev;
  7431. u8 offset = bnx2x_uc_list_cam_offset(bp);
  7432. struct netdev_hw_addr *ha;
  7433. struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
  7434. dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
  7435. if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
  7436. return -EINVAL;
  7437. netdev_for_each_uc_addr(ha, dev) {
  7438. /* copy mac */
  7439. config_cmd->config_table[i].msb_mac_addr =
  7440. swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
  7441. config_cmd->config_table[i].middle_mac_addr =
  7442. swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
  7443. config_cmd->config_table[i].lsb_mac_addr =
  7444. swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
  7445. config_cmd->config_table[i].vlan_id = 0;
  7446. config_cmd->config_table[i].pf_id = BP_FUNC(bp);
  7447. config_cmd->config_table[i].clients_bit_vector =
  7448. cpu_to_le32(1 << BP_L_ID(bp));
  7449. SET_FLAG(config_cmd->config_table[i].flags,
  7450. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  7451. T_ETH_MAC_COMMAND_SET);
  7452. DP(NETIF_MSG_IFUP,
  7453. "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
  7454. config_cmd->config_table[i].msb_mac_addr,
  7455. config_cmd->config_table[i].middle_mac_addr,
  7456. config_cmd->config_table[i].lsb_mac_addr);
  7457. i++;
  7458. /* Set uc MAC in NIG */
  7459. bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
  7460. LLH_CAM_ETH_LINE + i);
  7461. }
  7462. old = config_cmd->hdr.length;
  7463. if (old > i) {
  7464. for (; i < old; i++) {
  7465. if (CAM_IS_INVALID(config_cmd->
  7466. config_table[i])) {
  7467. /* already invalidated */
  7468. break;
  7469. }
  7470. /* invalidate */
  7471. SET_FLAG(config_cmd->config_table[i].flags,
  7472. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  7473. T_ETH_MAC_COMMAND_INVALIDATE);
  7474. }
  7475. }
  7476. wmb();
  7477. config_cmd->hdr.length = i;
  7478. config_cmd->hdr.offset = offset;
  7479. config_cmd->hdr.client_id = 0xff;
  7480. /* Mark that this ramrod doesn't use bp->set_mac_pending for
  7481. * synchronization.
  7482. */
  7483. config_cmd->hdr.echo = 0;
  7484. mb();
  7485. return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  7486. U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
  7487. }
  7488. void bnx2x_invalidate_uc_list(struct bnx2x *bp)
  7489. {
  7490. int i;
  7491. struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
  7492. dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
  7493. int ramrod_flags = WAIT_RAMROD_COMMON;
  7494. u8 offset = bnx2x_uc_list_cam_offset(bp);
  7495. u8 max_list_size = bnx2x_max_uc_list(bp);
  7496. for (i = 0; i < max_list_size; i++) {
  7497. SET_FLAG(config_cmd->config_table[i].flags,
  7498. MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
  7499. T_ETH_MAC_COMMAND_INVALIDATE);
  7500. bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
  7501. }
  7502. wmb();
  7503. config_cmd->hdr.length = max_list_size;
  7504. config_cmd->hdr.offset = offset;
  7505. config_cmd->hdr.client_id = 0xff;
  7506. /* We'll wait for a completion this time... */
  7507. config_cmd->hdr.echo = 1;
  7508. bp->set_mac_pending = 1;
  7509. mb();
  7510. bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
  7511. U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
  7512. /* Wait for a completion */
  7513. bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
  7514. ramrod_flags);
  7515. }
  7516. static inline int bnx2x_set_mc_list(struct bnx2x *bp)
  7517. {
  7518. /* some multicasts */
  7519. if (CHIP_IS_E1(bp)) {
  7520. return bnx2x_set_e1_mc_list(bp);
  7521. } else { /* E1H and newer */
  7522. return bnx2x_set_e1h_mc_list(bp);
  7523. }
  7524. }
  7525. /* called with netif_tx_lock from dev_mcast.c */
  7526. void bnx2x_set_rx_mode(struct net_device *dev)
  7527. {
  7528. struct bnx2x *bp = netdev_priv(dev);
  7529. u32 rx_mode = BNX2X_RX_MODE_NORMAL;
  7530. if (bp->state != BNX2X_STATE_OPEN) {
  7531. DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
  7532. return;
  7533. }
  7534. DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
  7535. if (dev->flags & IFF_PROMISC)
  7536. rx_mode = BNX2X_RX_MODE_PROMISC;
  7537. else if (dev->flags & IFF_ALLMULTI)
  7538. rx_mode = BNX2X_RX_MODE_ALLMULTI;
  7539. else {
  7540. /* some multicasts */
  7541. if (bnx2x_set_mc_list(bp))
  7542. rx_mode = BNX2X_RX_MODE_ALLMULTI;
  7543. /* some unicasts */
  7544. if (bnx2x_set_uc_list(bp))
  7545. rx_mode = BNX2X_RX_MODE_PROMISC;
  7546. }
  7547. bp->rx_mode = rx_mode;
  7548. bnx2x_set_storm_rx_mode(bp);
  7549. }
  7550. /* called with rtnl_lock */
  7551. static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
  7552. int devad, u16 addr)
  7553. {
  7554. struct bnx2x *bp = netdev_priv(netdev);
  7555. u16 value;
  7556. int rc;
  7557. DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
  7558. prtad, devad, addr);
  7559. /* The HW expects different devad if CL22 is used */
  7560. devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
  7561. bnx2x_acquire_phy_lock(bp);
  7562. rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
  7563. bnx2x_release_phy_lock(bp);
  7564. DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
  7565. if (!rc)
  7566. rc = value;
  7567. return rc;
  7568. }
  7569. /* called with rtnl_lock */
  7570. static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
  7571. u16 addr, u16 value)
  7572. {
  7573. struct bnx2x *bp = netdev_priv(netdev);
  7574. int rc;
  7575. DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
  7576. " value 0x%x\n", prtad, devad, addr, value);
  7577. /* The HW expects different devad if CL22 is used */
  7578. devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
  7579. bnx2x_acquire_phy_lock(bp);
  7580. rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
  7581. bnx2x_release_phy_lock(bp);
  7582. return rc;
  7583. }
  7584. /* called with rtnl_lock */
  7585. static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  7586. {
  7587. struct bnx2x *bp = netdev_priv(dev);
  7588. struct mii_ioctl_data *mdio = if_mii(ifr);
  7589. DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
  7590. mdio->phy_id, mdio->reg_num, mdio->val_in);
  7591. if (!netif_running(dev))
  7592. return -EAGAIN;
  7593. return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
  7594. }
  7595. #ifdef CONFIG_NET_POLL_CONTROLLER
  7596. static void poll_bnx2x(struct net_device *dev)
  7597. {
  7598. struct bnx2x *bp = netdev_priv(dev);
  7599. disable_irq(bp->pdev->irq);
  7600. bnx2x_interrupt(bp->pdev->irq, dev);
  7601. enable_irq(bp->pdev->irq);
  7602. }
  7603. #endif
  7604. static const struct net_device_ops bnx2x_netdev_ops = {
  7605. .ndo_open = bnx2x_open,
  7606. .ndo_stop = bnx2x_close,
  7607. .ndo_start_xmit = bnx2x_start_xmit,
  7608. .ndo_select_queue = bnx2x_select_queue,
  7609. .ndo_set_rx_mode = bnx2x_set_rx_mode,
  7610. .ndo_set_mac_address = bnx2x_change_mac_addr,
  7611. .ndo_validate_addr = eth_validate_addr,
  7612. .ndo_do_ioctl = bnx2x_ioctl,
  7613. .ndo_change_mtu = bnx2x_change_mtu,
  7614. .ndo_fix_features = bnx2x_fix_features,
  7615. .ndo_set_features = bnx2x_set_features,
  7616. .ndo_tx_timeout = bnx2x_tx_timeout,
  7617. #ifdef CONFIG_NET_POLL_CONTROLLER
  7618. .ndo_poll_controller = poll_bnx2x,
  7619. #endif
  7620. };
  7621. static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
  7622. struct net_device *dev)
  7623. {
  7624. struct bnx2x *bp;
  7625. int rc;
  7626. SET_NETDEV_DEV(dev, &pdev->dev);
  7627. bp = netdev_priv(dev);
  7628. bp->dev = dev;
  7629. bp->pdev = pdev;
  7630. bp->flags = 0;
  7631. bp->pf_num = PCI_FUNC(pdev->devfn);
  7632. rc = pci_enable_device(pdev);
  7633. if (rc) {
  7634. dev_err(&bp->pdev->dev,
  7635. "Cannot enable PCI device, aborting\n");
  7636. goto err_out;
  7637. }
  7638. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  7639. dev_err(&bp->pdev->dev,
  7640. "Cannot find PCI device base address, aborting\n");
  7641. rc = -ENODEV;
  7642. goto err_out_disable;
  7643. }
  7644. if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
  7645. dev_err(&bp->pdev->dev, "Cannot find second PCI device"
  7646. " base address, aborting\n");
  7647. rc = -ENODEV;
  7648. goto err_out_disable;
  7649. }
  7650. if (atomic_read(&pdev->enable_cnt) == 1) {
  7651. rc = pci_request_regions(pdev, DRV_MODULE_NAME);
  7652. if (rc) {
  7653. dev_err(&bp->pdev->dev,
  7654. "Cannot obtain PCI resources, aborting\n");
  7655. goto err_out_disable;
  7656. }
  7657. pci_set_master(pdev);
  7658. pci_save_state(pdev);
  7659. }
  7660. bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
  7661. if (bp->pm_cap == 0) {
  7662. dev_err(&bp->pdev->dev,
  7663. "Cannot find power management capability, aborting\n");
  7664. rc = -EIO;
  7665. goto err_out_release;
  7666. }
  7667. bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  7668. if (bp->pcie_cap == 0) {
  7669. dev_err(&bp->pdev->dev,
  7670. "Cannot find PCI Express capability, aborting\n");
  7671. rc = -EIO;
  7672. goto err_out_release;
  7673. }
  7674. if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
  7675. bp->flags |= USING_DAC_FLAG;
  7676. if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
  7677. dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
  7678. " failed, aborting\n");
  7679. rc = -EIO;
  7680. goto err_out_release;
  7681. }
  7682. } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
  7683. dev_err(&bp->pdev->dev,
  7684. "System does not support DMA, aborting\n");
  7685. rc = -EIO;
  7686. goto err_out_release;
  7687. }
  7688. dev->mem_start = pci_resource_start(pdev, 0);
  7689. dev->base_addr = dev->mem_start;
  7690. dev->mem_end = pci_resource_end(pdev, 0);
  7691. dev->irq = pdev->irq;
  7692. bp->regview = pci_ioremap_bar(pdev, 0);
  7693. if (!bp->regview) {
  7694. dev_err(&bp->pdev->dev,
  7695. "Cannot map register space, aborting\n");
  7696. rc = -ENOMEM;
  7697. goto err_out_release;
  7698. }
  7699. bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
  7700. min_t(u64, BNX2X_DB_SIZE(bp),
  7701. pci_resource_len(pdev, 2)));
  7702. if (!bp->doorbells) {
  7703. dev_err(&bp->pdev->dev,
  7704. "Cannot map doorbell space, aborting\n");
  7705. rc = -ENOMEM;
  7706. goto err_out_unmap;
  7707. }
  7708. bnx2x_set_power_state(bp, PCI_D0);
  7709. /* clean indirect addresses */
  7710. pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
  7711. PCICFG_VENDOR_ID_OFFSET);
  7712. REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
  7713. REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
  7714. REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
  7715. REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
  7716. /* Reset the load counter */
  7717. bnx2x_clear_load_cnt(bp);
  7718. dev->watchdog_timeo = TX_TIMEOUT;
  7719. dev->netdev_ops = &bnx2x_netdev_ops;
  7720. bnx2x_set_ethtool_ops(dev);
  7721. dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  7722. NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
  7723. NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
  7724. dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  7725. NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
  7726. dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
  7727. if (bp->flags & USING_DAC_FLAG)
  7728. dev->features |= NETIF_F_HIGHDMA;
  7729. /* Add Loopback capability to the device */
  7730. dev->hw_features |= NETIF_F_LOOPBACK;
  7731. #ifdef BCM_DCBNL
  7732. dev->dcbnl_ops = &bnx2x_dcbnl_ops;
  7733. #endif
  7734. /* get_port_hwinfo() will set prtad and mmds properly */
  7735. bp->mdio.prtad = MDIO_PRTAD_NONE;
  7736. bp->mdio.mmds = 0;
  7737. bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
  7738. bp->mdio.dev = dev;
  7739. bp->mdio.mdio_read = bnx2x_mdio_read;
  7740. bp->mdio.mdio_write = bnx2x_mdio_write;
  7741. return 0;
  7742. err_out_unmap:
  7743. if (bp->regview) {
  7744. iounmap(bp->regview);
  7745. bp->regview = NULL;
  7746. }
  7747. if (bp->doorbells) {
  7748. iounmap(bp->doorbells);
  7749. bp->doorbells = NULL;
  7750. }
  7751. err_out_release:
  7752. if (atomic_read(&pdev->enable_cnt) == 1)
  7753. pci_release_regions(pdev);
  7754. err_out_disable:
  7755. pci_disable_device(pdev);
  7756. pci_set_drvdata(pdev, NULL);
  7757. err_out:
  7758. return rc;
  7759. }
  7760. static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
  7761. int *width, int *speed)
  7762. {
  7763. u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
  7764. *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
  7765. /* return value of 1=2.5GHz 2=5GHz */
  7766. *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
  7767. }
  7768. static int bnx2x_check_firmware(struct bnx2x *bp)
  7769. {
  7770. const struct firmware *firmware = bp->firmware;
  7771. struct bnx2x_fw_file_hdr *fw_hdr;
  7772. struct bnx2x_fw_file_section *sections;
  7773. u32 offset, len, num_ops;
  7774. u16 *ops_offsets;
  7775. int i;
  7776. const u8 *fw_ver;
  7777. if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
  7778. return -EINVAL;
  7779. fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
  7780. sections = (struct bnx2x_fw_file_section *)fw_hdr;
  7781. /* Make sure none of the offsets and sizes make us read beyond
  7782. * the end of the firmware data */
  7783. for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
  7784. offset = be32_to_cpu(sections[i].offset);
  7785. len = be32_to_cpu(sections[i].len);
  7786. if (offset + len > firmware->size) {
  7787. dev_err(&bp->pdev->dev,
  7788. "Section %d length is out of bounds\n", i);
  7789. return -EINVAL;
  7790. }
  7791. }
  7792. /* Likewise for the init_ops offsets */
  7793. offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
  7794. ops_offsets = (u16 *)(firmware->data + offset);
  7795. num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
  7796. for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
  7797. if (be16_to_cpu(ops_offsets[i]) > num_ops) {
  7798. dev_err(&bp->pdev->dev,
  7799. "Section offset %d is out of bounds\n", i);
  7800. return -EINVAL;
  7801. }
  7802. }
  7803. /* Check FW version */
  7804. offset = be32_to_cpu(fw_hdr->fw_version.offset);
  7805. fw_ver = firmware->data + offset;
  7806. if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
  7807. (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
  7808. (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
  7809. (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
  7810. dev_err(&bp->pdev->dev,
  7811. "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
  7812. fw_ver[0], fw_ver[1], fw_ver[2],
  7813. fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
  7814. BCM_5710_FW_MINOR_VERSION,
  7815. BCM_5710_FW_REVISION_VERSION,
  7816. BCM_5710_FW_ENGINEERING_VERSION);
  7817. return -EINVAL;
  7818. }
  7819. return 0;
  7820. }
  7821. static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
  7822. {
  7823. const __be32 *source = (const __be32 *)_source;
  7824. u32 *target = (u32 *)_target;
  7825. u32 i;
  7826. for (i = 0; i < n/4; i++)
  7827. target[i] = be32_to_cpu(source[i]);
  7828. }
  7829. /*
  7830. Ops array is stored in the following format:
  7831. {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
  7832. */
  7833. static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
  7834. {
  7835. const __be32 *source = (const __be32 *)_source;
  7836. struct raw_op *target = (struct raw_op *)_target;
  7837. u32 i, j, tmp;
  7838. for (i = 0, j = 0; i < n/8; i++, j += 2) {
  7839. tmp = be32_to_cpu(source[j]);
  7840. target[i].op = (tmp >> 24) & 0xff;
  7841. target[i].offset = tmp & 0xffffff;
  7842. target[i].raw_data = be32_to_cpu(source[j + 1]);
  7843. }
  7844. }
  7845. /**
  7846. * IRO array is stored in the following format:
  7847. * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
  7848. */
  7849. static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
  7850. {
  7851. const __be32 *source = (const __be32 *)_source;
  7852. struct iro *target = (struct iro *)_target;
  7853. u32 i, j, tmp;
  7854. for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
  7855. target[i].base = be32_to_cpu(source[j]);
  7856. j++;
  7857. tmp = be32_to_cpu(source[j]);
  7858. target[i].m1 = (tmp >> 16) & 0xffff;
  7859. target[i].m2 = tmp & 0xffff;
  7860. j++;
  7861. tmp = be32_to_cpu(source[j]);
  7862. target[i].m3 = (tmp >> 16) & 0xffff;
  7863. target[i].size = tmp & 0xffff;
  7864. j++;
  7865. }
  7866. }
  7867. static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
  7868. {
  7869. const __be16 *source = (const __be16 *)_source;
  7870. u16 *target = (u16 *)_target;
  7871. u32 i;
  7872. for (i = 0; i < n/2; i++)
  7873. target[i] = be16_to_cpu(source[i]);
  7874. }
  7875. #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
  7876. do { \
  7877. u32 len = be32_to_cpu(fw_hdr->arr.len); \
  7878. bp->arr = kmalloc(len, GFP_KERNEL); \
  7879. if (!bp->arr) { \
  7880. pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
  7881. goto lbl; \
  7882. } \
  7883. func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
  7884. (u8 *)bp->arr, len); \
  7885. } while (0)
  7886. int bnx2x_init_firmware(struct bnx2x *bp)
  7887. {
  7888. const char *fw_file_name;
  7889. struct bnx2x_fw_file_hdr *fw_hdr;
  7890. int rc;
  7891. if (CHIP_IS_E1(bp))
  7892. fw_file_name = FW_FILE_NAME_E1;
  7893. else if (CHIP_IS_E1H(bp))
  7894. fw_file_name = FW_FILE_NAME_E1H;
  7895. else if (CHIP_IS_E2(bp))
  7896. fw_file_name = FW_FILE_NAME_E2;
  7897. else {
  7898. BNX2X_ERR("Unsupported chip revision\n");
  7899. return -EINVAL;
  7900. }
  7901. BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
  7902. rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
  7903. if (rc) {
  7904. BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
  7905. goto request_firmware_exit;
  7906. }
  7907. rc = bnx2x_check_firmware(bp);
  7908. if (rc) {
  7909. BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
  7910. goto request_firmware_exit;
  7911. }
  7912. fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
  7913. /* Initialize the pointers to the init arrays */
  7914. /* Blob */
  7915. BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
  7916. /* Opcodes */
  7917. BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
  7918. /* Offsets */
  7919. BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
  7920. be16_to_cpu_n);
  7921. /* STORMs firmware */
  7922. INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
  7923. be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
  7924. INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
  7925. be32_to_cpu(fw_hdr->tsem_pram_data.offset);
  7926. INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
  7927. be32_to_cpu(fw_hdr->usem_int_table_data.offset);
  7928. INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
  7929. be32_to_cpu(fw_hdr->usem_pram_data.offset);
  7930. INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
  7931. be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
  7932. INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
  7933. be32_to_cpu(fw_hdr->xsem_pram_data.offset);
  7934. INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
  7935. be32_to_cpu(fw_hdr->csem_int_table_data.offset);
  7936. INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
  7937. be32_to_cpu(fw_hdr->csem_pram_data.offset);
  7938. /* IRO */
  7939. BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
  7940. return 0;
  7941. iro_alloc_err:
  7942. kfree(bp->init_ops_offsets);
  7943. init_offsets_alloc_err:
  7944. kfree(bp->init_ops);
  7945. init_ops_alloc_err:
  7946. kfree(bp->init_data);
  7947. request_firmware_exit:
  7948. release_firmware(bp->firmware);
  7949. return rc;
  7950. }
  7951. static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
  7952. {
  7953. int cid_count = L2_FP_COUNT(l2_cid_count);
  7954. #ifdef BCM_CNIC
  7955. cid_count += CNIC_CID_MAX;
  7956. #endif
  7957. return roundup(cid_count, QM_CID_ROUND);
  7958. }
  7959. static int __devinit bnx2x_init_one(struct pci_dev *pdev,
  7960. const struct pci_device_id *ent)
  7961. {
  7962. struct net_device *dev = NULL;
  7963. struct bnx2x *bp;
  7964. int pcie_width, pcie_speed;
  7965. int rc, cid_count;
  7966. switch (ent->driver_data) {
  7967. case BCM57710:
  7968. case BCM57711:
  7969. case BCM57711E:
  7970. cid_count = FP_SB_MAX_E1x;
  7971. break;
  7972. case BCM57712:
  7973. case BCM57712E:
  7974. cid_count = FP_SB_MAX_E2;
  7975. break;
  7976. default:
  7977. pr_err("Unknown board_type (%ld), aborting\n",
  7978. ent->driver_data);
  7979. return -ENODEV;
  7980. }
  7981. cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
  7982. /* dev zeroed in init_etherdev */
  7983. dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
  7984. if (!dev) {
  7985. dev_err(&pdev->dev, "Cannot allocate net device\n");
  7986. return -ENOMEM;
  7987. }
  7988. bp = netdev_priv(dev);
  7989. bp->msg_enable = debug;
  7990. pci_set_drvdata(pdev, dev);
  7991. bp->l2_cid_count = cid_count;
  7992. rc = bnx2x_init_dev(pdev, dev);
  7993. if (rc < 0) {
  7994. free_netdev(dev);
  7995. return rc;
  7996. }
  7997. rc = bnx2x_init_bp(bp);
  7998. if (rc)
  7999. goto init_one_exit;
  8000. /* calc qm_cid_count */
  8001. bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
  8002. #ifdef BCM_CNIC
  8003. /* disable FCOE L2 queue for E1x*/
  8004. if (CHIP_IS_E1x(bp))
  8005. bp->flags |= NO_FCOE_FLAG;
  8006. #endif
  8007. /* Configure interrupt mode: try to enable MSI-X/MSI if
  8008. * needed, set bp->num_queues appropriately.
  8009. */
  8010. bnx2x_set_int_mode(bp);
  8011. /* Add all NAPI objects */
  8012. bnx2x_add_all_napi(bp);
  8013. rc = register_netdev(dev);
  8014. if (rc) {
  8015. dev_err(&pdev->dev, "Cannot register net device\n");
  8016. goto init_one_exit;
  8017. }
  8018. #ifdef BCM_CNIC
  8019. if (!NO_FCOE(bp)) {
  8020. /* Add storage MAC address */
  8021. rtnl_lock();
  8022. dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
  8023. rtnl_unlock();
  8024. }
  8025. #endif
  8026. bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
  8027. netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
  8028. " IRQ %d, ", board_info[ent->driver_data].name,
  8029. (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
  8030. pcie_width,
  8031. ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
  8032. (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
  8033. "5GHz (Gen2)" : "2.5GHz",
  8034. dev->base_addr, bp->pdev->irq);
  8035. pr_cont("node addr %pM\n", dev->dev_addr);
  8036. return 0;
  8037. init_one_exit:
  8038. if (bp->regview)
  8039. iounmap(bp->regview);
  8040. if (bp->doorbells)
  8041. iounmap(bp->doorbells);
  8042. free_netdev(dev);
  8043. if (atomic_read(&pdev->enable_cnt) == 1)
  8044. pci_release_regions(pdev);
  8045. pci_disable_device(pdev);
  8046. pci_set_drvdata(pdev, NULL);
  8047. return rc;
  8048. }
  8049. static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
  8050. {
  8051. struct net_device *dev = pci_get_drvdata(pdev);
  8052. struct bnx2x *bp;
  8053. if (!dev) {
  8054. dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
  8055. return;
  8056. }
  8057. bp = netdev_priv(dev);
  8058. #ifdef BCM_CNIC
  8059. /* Delete storage MAC address */
  8060. if (!NO_FCOE(bp)) {
  8061. rtnl_lock();
  8062. dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
  8063. rtnl_unlock();
  8064. }
  8065. #endif
  8066. #ifdef BCM_DCBNL
  8067. /* Delete app tlvs from dcbnl */
  8068. bnx2x_dcbnl_update_applist(bp, true);
  8069. #endif
  8070. unregister_netdev(dev);
  8071. /* Delete all NAPI objects */
  8072. bnx2x_del_all_napi(bp);
  8073. /* Power on: we can't let PCI layer write to us while we are in D3 */
  8074. bnx2x_set_power_state(bp, PCI_D0);
  8075. /* Disable MSI/MSI-X */
  8076. bnx2x_disable_msi(bp);
  8077. /* Power off */
  8078. bnx2x_set_power_state(bp, PCI_D3hot);
  8079. /* Make sure RESET task is not scheduled before continuing */
  8080. cancel_delayed_work_sync(&bp->reset_task);
  8081. if (bp->regview)
  8082. iounmap(bp->regview);
  8083. if (bp->doorbells)
  8084. iounmap(bp->doorbells);
  8085. bnx2x_free_mem_bp(bp);
  8086. free_netdev(dev);
  8087. if (atomic_read(&pdev->enable_cnt) == 1)
  8088. pci_release_regions(pdev);
  8089. pci_disable_device(pdev);
  8090. pci_set_drvdata(pdev, NULL);
  8091. }
  8092. static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
  8093. {
  8094. int i;
  8095. bp->state = BNX2X_STATE_ERROR;
  8096. bp->rx_mode = BNX2X_RX_MODE_NONE;
  8097. bnx2x_netif_stop(bp, 0);
  8098. netif_carrier_off(bp->dev);
  8099. del_timer_sync(&bp->timer);
  8100. bp->stats_state = STATS_STATE_DISABLED;
  8101. DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
  8102. /* Release IRQs */
  8103. bnx2x_free_irq(bp);
  8104. /* Free SKBs, SGEs, TPA pool and driver internals */
  8105. bnx2x_free_skbs(bp);
  8106. for_each_rx_queue(bp, i)
  8107. bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
  8108. bnx2x_free_mem(bp);
  8109. bp->state = BNX2X_STATE_CLOSED;
  8110. return 0;
  8111. }
  8112. static void bnx2x_eeh_recover(struct bnx2x *bp)
  8113. {
  8114. u32 val;
  8115. mutex_init(&bp->port.phy_mutex);
  8116. bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
  8117. bp->link_params.shmem_base = bp->common.shmem_base;
  8118. BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
  8119. if (!bp->common.shmem_base ||
  8120. (bp->common.shmem_base < 0xA0000) ||
  8121. (bp->common.shmem_base >= 0xC0000)) {
  8122. BNX2X_DEV_INFO("MCP not active\n");
  8123. bp->flags |= NO_MCP_FLAG;
  8124. return;
  8125. }
  8126. val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
  8127. if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  8128. != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
  8129. BNX2X_ERR("BAD MCP validity signature\n");
  8130. if (!BP_NOMCP(bp)) {
  8131. bp->fw_seq =
  8132. (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
  8133. DRV_MSG_SEQ_NUMBER_MASK);
  8134. BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
  8135. }
  8136. }
  8137. /**
  8138. * bnx2x_io_error_detected - called when PCI error is detected
  8139. * @pdev: Pointer to PCI device
  8140. * @state: The current pci connection state
  8141. *
  8142. * This function is called after a PCI bus error affecting
  8143. * this device has been detected.
  8144. */
  8145. static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
  8146. pci_channel_state_t state)
  8147. {
  8148. struct net_device *dev = pci_get_drvdata(pdev);
  8149. struct bnx2x *bp = netdev_priv(dev);
  8150. rtnl_lock();
  8151. netif_device_detach(dev);
  8152. if (state == pci_channel_io_perm_failure) {
  8153. rtnl_unlock();
  8154. return PCI_ERS_RESULT_DISCONNECT;
  8155. }
  8156. if (netif_running(dev))
  8157. bnx2x_eeh_nic_unload(bp);
  8158. pci_disable_device(pdev);
  8159. rtnl_unlock();
  8160. /* Request a slot reset */
  8161. return PCI_ERS_RESULT_NEED_RESET;
  8162. }
  8163. /**
  8164. * bnx2x_io_slot_reset - called after the PCI bus has been reset
  8165. * @pdev: Pointer to PCI device
  8166. *
  8167. * Restart the card from scratch, as if from a cold-boot.
  8168. */
  8169. static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
  8170. {
  8171. struct net_device *dev = pci_get_drvdata(pdev);
  8172. struct bnx2x *bp = netdev_priv(dev);
  8173. rtnl_lock();
  8174. if (pci_enable_device(pdev)) {
  8175. dev_err(&pdev->dev,
  8176. "Cannot re-enable PCI device after reset\n");
  8177. rtnl_unlock();
  8178. return PCI_ERS_RESULT_DISCONNECT;
  8179. }
  8180. pci_set_master(pdev);
  8181. pci_restore_state(pdev);
  8182. if (netif_running(dev))
  8183. bnx2x_set_power_state(bp, PCI_D0);
  8184. rtnl_unlock();
  8185. return PCI_ERS_RESULT_RECOVERED;
  8186. }
  8187. /**
  8188. * bnx2x_io_resume - called when traffic can start flowing again
  8189. * @pdev: Pointer to PCI device
  8190. *
  8191. * This callback is called when the error recovery driver tells us that
  8192. * its OK to resume normal operation.
  8193. */
  8194. static void bnx2x_io_resume(struct pci_dev *pdev)
  8195. {
  8196. struct net_device *dev = pci_get_drvdata(pdev);
  8197. struct bnx2x *bp = netdev_priv(dev);
  8198. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  8199. printk(KERN_ERR "Handling parity error recovery. "
  8200. "Try again later\n");
  8201. return;
  8202. }
  8203. rtnl_lock();
  8204. bnx2x_eeh_recover(bp);
  8205. if (netif_running(dev))
  8206. bnx2x_nic_load(bp, LOAD_NORMAL);
  8207. netif_device_attach(dev);
  8208. rtnl_unlock();
  8209. }
  8210. static struct pci_error_handlers bnx2x_err_handler = {
  8211. .error_detected = bnx2x_io_error_detected,
  8212. .slot_reset = bnx2x_io_slot_reset,
  8213. .resume = bnx2x_io_resume,
  8214. };
  8215. static struct pci_driver bnx2x_pci_driver = {
  8216. .name = DRV_MODULE_NAME,
  8217. .id_table = bnx2x_pci_tbl,
  8218. .probe = bnx2x_init_one,
  8219. .remove = __devexit_p(bnx2x_remove_one),
  8220. .suspend = bnx2x_suspend,
  8221. .resume = bnx2x_resume,
  8222. .err_handler = &bnx2x_err_handler,
  8223. };
  8224. static int __init bnx2x_init(void)
  8225. {
  8226. int ret;
  8227. pr_info("%s", version);
  8228. bnx2x_wq = create_singlethread_workqueue("bnx2x");
  8229. if (bnx2x_wq == NULL) {
  8230. pr_err("Cannot create workqueue\n");
  8231. return -ENOMEM;
  8232. }
  8233. ret = pci_register_driver(&bnx2x_pci_driver);
  8234. if (ret) {
  8235. pr_err("Cannot register driver\n");
  8236. destroy_workqueue(bnx2x_wq);
  8237. }
  8238. return ret;
  8239. }
  8240. static void __exit bnx2x_cleanup(void)
  8241. {
  8242. pci_unregister_driver(&bnx2x_pci_driver);
  8243. destroy_workqueue(bnx2x_wq);
  8244. }
  8245. module_init(bnx2x_init);
  8246. module_exit(bnx2x_cleanup);
  8247. #ifdef BCM_CNIC
  8248. /* count denotes the number of new completions we have seen */
  8249. static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
  8250. {
  8251. struct eth_spe *spe;
  8252. #ifdef BNX2X_STOP_ON_ERROR
  8253. if (unlikely(bp->panic))
  8254. return;
  8255. #endif
  8256. spin_lock_bh(&bp->spq_lock);
  8257. BUG_ON(bp->cnic_spq_pending < count);
  8258. bp->cnic_spq_pending -= count;
  8259. for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
  8260. u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
  8261. & SPE_HDR_CONN_TYPE) >>
  8262. SPE_HDR_CONN_TYPE_SHIFT;
  8263. /* Set validation for iSCSI L2 client before sending SETUP
  8264. * ramrod
  8265. */
  8266. if (type == ETH_CONNECTION_TYPE) {
  8267. u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
  8268. hdr.conn_and_cmd_data) >>
  8269. SPE_HDR_CMD_ID_SHIFT) & 0xff;
  8270. if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
  8271. bnx2x_set_ctx_validation(&bp->context.
  8272. vcxt[BNX2X_ISCSI_ETH_CID].eth,
  8273. HW_CID(bp, BNX2X_ISCSI_ETH_CID));
  8274. }
  8275. /* There may be not more than 8 L2 and not more than 8 L5 SPEs
  8276. * We also check that the number of outstanding
  8277. * COMMON ramrods is not more than the EQ and SPQ can
  8278. * accommodate.
  8279. */
  8280. if (type == ETH_CONNECTION_TYPE) {
  8281. if (!atomic_read(&bp->cq_spq_left))
  8282. break;
  8283. else
  8284. atomic_dec(&bp->cq_spq_left);
  8285. } else if (type == NONE_CONNECTION_TYPE) {
  8286. if (!atomic_read(&bp->eq_spq_left))
  8287. break;
  8288. else
  8289. atomic_dec(&bp->eq_spq_left);
  8290. } else if ((type == ISCSI_CONNECTION_TYPE) ||
  8291. (type == FCOE_CONNECTION_TYPE)) {
  8292. if (bp->cnic_spq_pending >=
  8293. bp->cnic_eth_dev.max_kwqe_pending)
  8294. break;
  8295. else
  8296. bp->cnic_spq_pending++;
  8297. } else {
  8298. BNX2X_ERR("Unknown SPE type: %d\n", type);
  8299. bnx2x_panic();
  8300. break;
  8301. }
  8302. spe = bnx2x_sp_get_next(bp);
  8303. *spe = *bp->cnic_kwq_cons;
  8304. DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
  8305. bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
  8306. if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
  8307. bp->cnic_kwq_cons = bp->cnic_kwq;
  8308. else
  8309. bp->cnic_kwq_cons++;
  8310. }
  8311. bnx2x_sp_prod_update(bp);
  8312. spin_unlock_bh(&bp->spq_lock);
  8313. }
  8314. static int bnx2x_cnic_sp_queue(struct net_device *dev,
  8315. struct kwqe_16 *kwqes[], u32 count)
  8316. {
  8317. struct bnx2x *bp = netdev_priv(dev);
  8318. int i;
  8319. #ifdef BNX2X_STOP_ON_ERROR
  8320. if (unlikely(bp->panic))
  8321. return -EIO;
  8322. #endif
  8323. spin_lock_bh(&bp->spq_lock);
  8324. for (i = 0; i < count; i++) {
  8325. struct eth_spe *spe = (struct eth_spe *)kwqes[i];
  8326. if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
  8327. break;
  8328. *bp->cnic_kwq_prod = *spe;
  8329. bp->cnic_kwq_pending++;
  8330. DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
  8331. spe->hdr.conn_and_cmd_data, spe->hdr.type,
  8332. spe->data.update_data_addr.hi,
  8333. spe->data.update_data_addr.lo,
  8334. bp->cnic_kwq_pending);
  8335. if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
  8336. bp->cnic_kwq_prod = bp->cnic_kwq;
  8337. else
  8338. bp->cnic_kwq_prod++;
  8339. }
  8340. spin_unlock_bh(&bp->spq_lock);
  8341. if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
  8342. bnx2x_cnic_sp_post(bp, 0);
  8343. return i;
  8344. }
  8345. static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
  8346. {
  8347. struct cnic_ops *c_ops;
  8348. int rc = 0;
  8349. mutex_lock(&bp->cnic_mutex);
  8350. c_ops = rcu_dereference_protected(bp->cnic_ops,
  8351. lockdep_is_held(&bp->cnic_mutex));
  8352. if (c_ops)
  8353. rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
  8354. mutex_unlock(&bp->cnic_mutex);
  8355. return rc;
  8356. }
  8357. static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
  8358. {
  8359. struct cnic_ops *c_ops;
  8360. int rc = 0;
  8361. rcu_read_lock();
  8362. c_ops = rcu_dereference(bp->cnic_ops);
  8363. if (c_ops)
  8364. rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
  8365. rcu_read_unlock();
  8366. return rc;
  8367. }
  8368. /*
  8369. * for commands that have no data
  8370. */
  8371. int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
  8372. {
  8373. struct cnic_ctl_info ctl = {0};
  8374. ctl.cmd = cmd;
  8375. return bnx2x_cnic_ctl_send(bp, &ctl);
  8376. }
  8377. static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
  8378. {
  8379. struct cnic_ctl_info ctl;
  8380. /* first we tell CNIC and only then we count this as a completion */
  8381. ctl.cmd = CNIC_CTL_COMPLETION_CMD;
  8382. ctl.data.comp.cid = cid;
  8383. bnx2x_cnic_ctl_send_bh(bp, &ctl);
  8384. bnx2x_cnic_sp_post(bp, 0);
  8385. }
  8386. static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
  8387. {
  8388. struct bnx2x *bp = netdev_priv(dev);
  8389. int rc = 0;
  8390. switch (ctl->cmd) {
  8391. case DRV_CTL_CTXTBL_WR_CMD: {
  8392. u32 index = ctl->data.io.offset;
  8393. dma_addr_t addr = ctl->data.io.dma_addr;
  8394. bnx2x_ilt_wr(bp, index, addr);
  8395. break;
  8396. }
  8397. case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
  8398. int count = ctl->data.credit.credit_count;
  8399. bnx2x_cnic_sp_post(bp, count);
  8400. break;
  8401. }
  8402. /* rtnl_lock is held. */
  8403. case DRV_CTL_START_L2_CMD: {
  8404. u32 cli = ctl->data.ring.client_id;
  8405. /* Clear FCoE FIP and ALL ENODE MACs addresses first */
  8406. bnx2x_del_fcoe_eth_macs(bp);
  8407. /* Set iSCSI MAC address */
  8408. bnx2x_set_iscsi_eth_mac_addr(bp, 1);
  8409. mmiowb();
  8410. barrier();
  8411. /* Start accepting on iSCSI L2 ring. Accept all multicasts
  8412. * because it's the only way for UIO Client to accept
  8413. * multicasts (in non-promiscuous mode only one Client per
  8414. * function will receive multicast packets (leading in our
  8415. * case).
  8416. */
  8417. bnx2x_rxq_set_mac_filters(bp, cli,
  8418. BNX2X_ACCEPT_UNICAST |
  8419. BNX2X_ACCEPT_BROADCAST |
  8420. BNX2X_ACCEPT_ALL_MULTICAST);
  8421. storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
  8422. break;
  8423. }
  8424. /* rtnl_lock is held. */
  8425. case DRV_CTL_STOP_L2_CMD: {
  8426. u32 cli = ctl->data.ring.client_id;
  8427. /* Stop accepting on iSCSI L2 ring */
  8428. bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
  8429. storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
  8430. mmiowb();
  8431. barrier();
  8432. /* Unset iSCSI L2 MAC */
  8433. bnx2x_set_iscsi_eth_mac_addr(bp, 0);
  8434. break;
  8435. }
  8436. case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
  8437. int count = ctl->data.credit.credit_count;
  8438. smp_mb__before_atomic_inc();
  8439. atomic_add(count, &bp->cq_spq_left);
  8440. smp_mb__after_atomic_inc();
  8441. break;
  8442. }
  8443. case DRV_CTL_ISCSI_STOPPED_CMD: {
  8444. bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
  8445. break;
  8446. }
  8447. default:
  8448. BNX2X_ERR("unknown command %x\n", ctl->cmd);
  8449. rc = -EINVAL;
  8450. }
  8451. return rc;
  8452. }
  8453. void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
  8454. {
  8455. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  8456. if (bp->flags & USING_MSIX_FLAG) {
  8457. cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
  8458. cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
  8459. cp->irq_arr[0].vector = bp->msix_table[1].vector;
  8460. } else {
  8461. cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
  8462. cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
  8463. }
  8464. if (CHIP_IS_E2(bp))
  8465. cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
  8466. else
  8467. cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
  8468. cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
  8469. cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
  8470. cp->irq_arr[1].status_blk = bp->def_status_blk;
  8471. cp->irq_arr[1].status_blk_num = DEF_SB_ID;
  8472. cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
  8473. cp->num_irq = 2;
  8474. }
  8475. static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
  8476. void *data)
  8477. {
  8478. struct bnx2x *bp = netdev_priv(dev);
  8479. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  8480. if (ops == NULL)
  8481. return -EINVAL;
  8482. if (atomic_read(&bp->intr_sem) != 0)
  8483. return -EBUSY;
  8484. bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
  8485. if (!bp->cnic_kwq)
  8486. return -ENOMEM;
  8487. bp->cnic_kwq_cons = bp->cnic_kwq;
  8488. bp->cnic_kwq_prod = bp->cnic_kwq;
  8489. bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
  8490. bp->cnic_spq_pending = 0;
  8491. bp->cnic_kwq_pending = 0;
  8492. bp->cnic_data = data;
  8493. cp->num_irq = 0;
  8494. cp->drv_state = CNIC_DRV_STATE_REGD;
  8495. cp->iro_arr = bp->iro_arr;
  8496. bnx2x_setup_cnic_irq_info(bp);
  8497. rcu_assign_pointer(bp->cnic_ops, ops);
  8498. return 0;
  8499. }
  8500. static int bnx2x_unregister_cnic(struct net_device *dev)
  8501. {
  8502. struct bnx2x *bp = netdev_priv(dev);
  8503. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  8504. mutex_lock(&bp->cnic_mutex);
  8505. cp->drv_state = 0;
  8506. rcu_assign_pointer(bp->cnic_ops, NULL);
  8507. mutex_unlock(&bp->cnic_mutex);
  8508. synchronize_rcu();
  8509. kfree(bp->cnic_kwq);
  8510. bp->cnic_kwq = NULL;
  8511. return 0;
  8512. }
  8513. struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
  8514. {
  8515. struct bnx2x *bp = netdev_priv(dev);
  8516. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  8517. /* If both iSCSI and FCoE are disabled - return NULL in
  8518. * order to indicate CNIC that it should not try to work
  8519. * with this device.
  8520. */
  8521. if (NO_ISCSI(bp) && NO_FCOE(bp))
  8522. return NULL;
  8523. cp->drv_owner = THIS_MODULE;
  8524. cp->chip_id = CHIP_ID(bp);
  8525. cp->pdev = bp->pdev;
  8526. cp->io_base = bp->regview;
  8527. cp->io_base2 = bp->doorbells;
  8528. cp->max_kwqe_pending = 8;
  8529. cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
  8530. cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
  8531. bnx2x_cid_ilt_lines(bp);
  8532. cp->ctx_tbl_len = CNIC_ILT_LINES;
  8533. cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
  8534. cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
  8535. cp->drv_ctl = bnx2x_drv_ctl;
  8536. cp->drv_register_cnic = bnx2x_register_cnic;
  8537. cp->drv_unregister_cnic = bnx2x_unregister_cnic;
  8538. cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
  8539. cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
  8540. BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
  8541. cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
  8542. if (NO_ISCSI_OOO(bp))
  8543. cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
  8544. if (NO_ISCSI(bp))
  8545. cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
  8546. if (NO_FCOE(bp))
  8547. cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
  8548. DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
  8549. "starting cid %d\n",
  8550. cp->ctx_blk_size,
  8551. cp->ctx_tbl_offset,
  8552. cp->ctx_tbl_len,
  8553. cp->starting_cid);
  8554. return cp;
  8555. }
  8556. EXPORT_SYMBOL(bnx2x_cnic_probe);
  8557. #endif /* BCM_CNIC */