bnx2.c 207 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562
  1. /* bnx2.c: Broadcom NX2 network driver.
  2. *
  3. * Copyright (c) 2004-2011 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. *
  9. * Written by: Michael Chan (mchan@broadcom.com)
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/kernel.h>
  15. #include <linux/timer.h>
  16. #include <linux/errno.h>
  17. #include <linux/ioport.h>
  18. #include <linux/slab.h>
  19. #include <linux/vmalloc.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/pci.h>
  22. #include <linux/init.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/etherdevice.h>
  25. #include <linux/skbuff.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/bitops.h>
  28. #include <asm/io.h>
  29. #include <asm/irq.h>
  30. #include <linux/delay.h>
  31. #include <asm/byteorder.h>
  32. #include <asm/page.h>
  33. #include <linux/time.h>
  34. #include <linux/ethtool.h>
  35. #include <linux/mii.h>
  36. #include <linux/if_vlan.h>
  37. #include <net/ip.h>
  38. #include <net/tcp.h>
  39. #include <net/checksum.h>
  40. #include <linux/workqueue.h>
  41. #include <linux/crc32.h>
  42. #include <linux/prefetch.h>
  43. #include <linux/cache.h>
  44. #include <linux/firmware.h>
  45. #include <linux/log2.h>
  46. #include <linux/aer.h>
  47. #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
  48. #define BCM_CNIC 1
  49. #include "cnic_if.h"
  50. #endif
  51. #include "bnx2.h"
  52. #include "bnx2_fw.h"
  53. #define DRV_MODULE_NAME "bnx2"
  54. #define DRV_MODULE_VERSION "2.1.6"
  55. #define DRV_MODULE_RELDATE "Mar 7, 2011"
  56. #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
  57. #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
  58. #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
  59. #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
  60. #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
  61. #define RUN_AT(x) (jiffies + (x))
  62. /* Time in jiffies before concluding the transmitter is hung. */
  63. #define TX_TIMEOUT (5*HZ)
  64. static char version[] __devinitdata =
  65. "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  66. MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
  67. MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
  68. MODULE_LICENSE("GPL");
  69. MODULE_VERSION(DRV_MODULE_VERSION);
  70. MODULE_FIRMWARE(FW_MIPS_FILE_06);
  71. MODULE_FIRMWARE(FW_RV2P_FILE_06);
  72. MODULE_FIRMWARE(FW_MIPS_FILE_09);
  73. MODULE_FIRMWARE(FW_RV2P_FILE_09);
  74. MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
  75. static int disable_msi = 0;
  76. module_param(disable_msi, int, 0);
  77. MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  78. typedef enum {
  79. BCM5706 = 0,
  80. NC370T,
  81. NC370I,
  82. BCM5706S,
  83. NC370F,
  84. BCM5708,
  85. BCM5708S,
  86. BCM5709,
  87. BCM5709S,
  88. BCM5716,
  89. BCM5716S,
  90. } board_t;
  91. /* indexed by board_t, above */
  92. static struct {
  93. char *name;
  94. } board_info[] __devinitdata = {
  95. { "Broadcom NetXtreme II BCM5706 1000Base-T" },
  96. { "HP NC370T Multifunction Gigabit Server Adapter" },
  97. { "HP NC370i Multifunction Gigabit Server Adapter" },
  98. { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
  99. { "HP NC370F Multifunction Gigabit Server Adapter" },
  100. { "Broadcom NetXtreme II BCM5708 1000Base-T" },
  101. { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
  102. { "Broadcom NetXtreme II BCM5709 1000Base-T" },
  103. { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
  104. { "Broadcom NetXtreme II BCM5716 1000Base-T" },
  105. { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
  106. };
  107. static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
  108. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  109. PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
  110. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  111. PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
  112. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  113. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
  114. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
  115. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
  116. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
  117. PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
  118. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
  119. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
  120. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
  121. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
  122. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
  123. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
  124. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
  125. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
  126. { PCI_VENDOR_ID_BROADCOM, 0x163b,
  127. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
  128. { PCI_VENDOR_ID_BROADCOM, 0x163c,
  129. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
  130. { 0, }
  131. };
  132. static const struct flash_spec flash_table[] =
  133. {
  134. #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
  135. #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
  136. /* Slow EEPROM */
  137. {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
  138. BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
  139. SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
  140. "EEPROM - slow"},
  141. /* Expansion entry 0001 */
  142. {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
  143. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  144. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  145. "Entry 0001"},
  146. /* Saifun SA25F010 (non-buffered flash) */
  147. /* strap, cfg1, & write1 need updates */
  148. {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
  149. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  150. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
  151. "Non-buffered flash (128kB)"},
  152. /* Saifun SA25F020 (non-buffered flash) */
  153. /* strap, cfg1, & write1 need updates */
  154. {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
  155. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  156. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
  157. "Non-buffered flash (256kB)"},
  158. /* Expansion entry 0100 */
  159. {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
  160. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  161. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  162. "Entry 0100"},
  163. /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
  164. {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
  165. NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
  166. ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
  167. "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
  168. /* Entry 0110: ST M45PE20 (non-buffered flash)*/
  169. {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
  170. NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
  171. ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
  172. "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
  173. /* Saifun SA25F005 (non-buffered flash) */
  174. /* strap, cfg1, & write1 need updates */
  175. {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
  176. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  177. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
  178. "Non-buffered flash (64kB)"},
  179. /* Fast EEPROM */
  180. {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
  181. BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
  182. SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
  183. "EEPROM - fast"},
  184. /* Expansion entry 1001 */
  185. {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
  186. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  187. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  188. "Entry 1001"},
  189. /* Expansion entry 1010 */
  190. {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
  191. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  192. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  193. "Entry 1010"},
  194. /* ATMEL AT45DB011B (buffered flash) */
  195. {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
  196. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  197. BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
  198. "Buffered flash (128kB)"},
  199. /* Expansion entry 1100 */
  200. {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
  201. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  202. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  203. "Entry 1100"},
  204. /* Expansion entry 1101 */
  205. {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
  206. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  207. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  208. "Entry 1101"},
  209. /* Ateml Expansion entry 1110 */
  210. {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
  211. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  212. BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
  213. "Entry 1110 (Atmel)"},
  214. /* ATMEL AT45DB021B (buffered flash) */
  215. {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
  216. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  217. BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
  218. "Buffered flash (256kB)"},
  219. };
  220. static const struct flash_spec flash_5709 = {
  221. .flags = BNX2_NV_BUFFERED,
  222. .page_bits = BCM5709_FLASH_PAGE_BITS,
  223. .page_size = BCM5709_FLASH_PAGE_SIZE,
  224. .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
  225. .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
  226. .name = "5709 Buffered flash (256kB)",
  227. };
  228. MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
  229. static void bnx2_init_napi(struct bnx2 *bp);
  230. static void bnx2_del_napi(struct bnx2 *bp);
  231. static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
  232. {
  233. u32 diff;
  234. /* Tell compiler to fetch tx_prod and tx_cons from memory. */
  235. barrier();
  236. /* The ring uses 256 indices for 255 entries, one of them
  237. * needs to be skipped.
  238. */
  239. diff = txr->tx_prod - txr->tx_cons;
  240. if (unlikely(diff >= TX_DESC_CNT)) {
  241. diff &= 0xffff;
  242. if (diff == TX_DESC_CNT)
  243. diff = MAX_TX_DESC_CNT;
  244. }
  245. return bp->tx_ring_size - diff;
  246. }
  247. static u32
  248. bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
  249. {
  250. u32 val;
  251. spin_lock_bh(&bp->indirect_lock);
  252. REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
  253. val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
  254. spin_unlock_bh(&bp->indirect_lock);
  255. return val;
  256. }
  257. static void
  258. bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
  259. {
  260. spin_lock_bh(&bp->indirect_lock);
  261. REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
  262. REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
  263. spin_unlock_bh(&bp->indirect_lock);
  264. }
  265. static void
  266. bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
  267. {
  268. bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
  269. }
  270. static u32
  271. bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
  272. {
  273. return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
  274. }
  275. static void
  276. bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
  277. {
  278. offset += cid_addr;
  279. spin_lock_bh(&bp->indirect_lock);
  280. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  281. int i;
  282. REG_WR(bp, BNX2_CTX_CTX_DATA, val);
  283. REG_WR(bp, BNX2_CTX_CTX_CTRL,
  284. offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
  285. for (i = 0; i < 5; i++) {
  286. val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
  287. if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
  288. break;
  289. udelay(5);
  290. }
  291. } else {
  292. REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
  293. REG_WR(bp, BNX2_CTX_DATA, val);
  294. }
  295. spin_unlock_bh(&bp->indirect_lock);
  296. }
  297. #ifdef BCM_CNIC
  298. static int
  299. bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
  300. {
  301. struct bnx2 *bp = netdev_priv(dev);
  302. struct drv_ctl_io *io = &info->data.io;
  303. switch (info->cmd) {
  304. case DRV_CTL_IO_WR_CMD:
  305. bnx2_reg_wr_ind(bp, io->offset, io->data);
  306. break;
  307. case DRV_CTL_IO_RD_CMD:
  308. io->data = bnx2_reg_rd_ind(bp, io->offset);
  309. break;
  310. case DRV_CTL_CTX_WR_CMD:
  311. bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
  312. break;
  313. default:
  314. return -EINVAL;
  315. }
  316. return 0;
  317. }
  318. static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
  319. {
  320. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  321. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  322. int sb_id;
  323. if (bp->flags & BNX2_FLAG_USING_MSIX) {
  324. cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
  325. bnapi->cnic_present = 0;
  326. sb_id = bp->irq_nvecs;
  327. cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
  328. } else {
  329. cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
  330. bnapi->cnic_tag = bnapi->last_status_idx;
  331. bnapi->cnic_present = 1;
  332. sb_id = 0;
  333. cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
  334. }
  335. cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
  336. cp->irq_arr[0].status_blk = (void *)
  337. ((unsigned long) bnapi->status_blk.msi +
  338. (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
  339. cp->irq_arr[0].status_blk_num = sb_id;
  340. cp->num_irq = 1;
  341. }
  342. static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
  343. void *data)
  344. {
  345. struct bnx2 *bp = netdev_priv(dev);
  346. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  347. if (ops == NULL)
  348. return -EINVAL;
  349. if (cp->drv_state & CNIC_DRV_STATE_REGD)
  350. return -EBUSY;
  351. bp->cnic_data = data;
  352. rcu_assign_pointer(bp->cnic_ops, ops);
  353. cp->num_irq = 0;
  354. cp->drv_state = CNIC_DRV_STATE_REGD;
  355. bnx2_setup_cnic_irq_info(bp);
  356. return 0;
  357. }
  358. static int bnx2_unregister_cnic(struct net_device *dev)
  359. {
  360. struct bnx2 *bp = netdev_priv(dev);
  361. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  362. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  363. mutex_lock(&bp->cnic_lock);
  364. cp->drv_state = 0;
  365. bnapi->cnic_present = 0;
  366. rcu_assign_pointer(bp->cnic_ops, NULL);
  367. mutex_unlock(&bp->cnic_lock);
  368. synchronize_rcu();
  369. return 0;
  370. }
  371. struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
  372. {
  373. struct bnx2 *bp = netdev_priv(dev);
  374. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  375. if (!cp->max_iscsi_conn)
  376. return NULL;
  377. cp->drv_owner = THIS_MODULE;
  378. cp->chip_id = bp->chip_id;
  379. cp->pdev = bp->pdev;
  380. cp->io_base = bp->regview;
  381. cp->drv_ctl = bnx2_drv_ctl;
  382. cp->drv_register_cnic = bnx2_register_cnic;
  383. cp->drv_unregister_cnic = bnx2_unregister_cnic;
  384. return cp;
  385. }
  386. EXPORT_SYMBOL(bnx2_cnic_probe);
  387. static void
  388. bnx2_cnic_stop(struct bnx2 *bp)
  389. {
  390. struct cnic_ops *c_ops;
  391. struct cnic_ctl_info info;
  392. mutex_lock(&bp->cnic_lock);
  393. c_ops = rcu_dereference_protected(bp->cnic_ops,
  394. lockdep_is_held(&bp->cnic_lock));
  395. if (c_ops) {
  396. info.cmd = CNIC_CTL_STOP_CMD;
  397. c_ops->cnic_ctl(bp->cnic_data, &info);
  398. }
  399. mutex_unlock(&bp->cnic_lock);
  400. }
  401. static void
  402. bnx2_cnic_start(struct bnx2 *bp)
  403. {
  404. struct cnic_ops *c_ops;
  405. struct cnic_ctl_info info;
  406. mutex_lock(&bp->cnic_lock);
  407. c_ops = rcu_dereference_protected(bp->cnic_ops,
  408. lockdep_is_held(&bp->cnic_lock));
  409. if (c_ops) {
  410. if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
  411. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  412. bnapi->cnic_tag = bnapi->last_status_idx;
  413. }
  414. info.cmd = CNIC_CTL_START_CMD;
  415. c_ops->cnic_ctl(bp->cnic_data, &info);
  416. }
  417. mutex_unlock(&bp->cnic_lock);
  418. }
  419. #else
  420. static void
  421. bnx2_cnic_stop(struct bnx2 *bp)
  422. {
  423. }
  424. static void
  425. bnx2_cnic_start(struct bnx2 *bp)
  426. {
  427. }
  428. #endif
  429. static int
  430. bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
  431. {
  432. u32 val1;
  433. int i, ret;
  434. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  435. val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  436. val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  437. REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  438. REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  439. udelay(40);
  440. }
  441. val1 = (bp->phy_addr << 21) | (reg << 16) |
  442. BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
  443. BNX2_EMAC_MDIO_COMM_START_BUSY;
  444. REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
  445. for (i = 0; i < 50; i++) {
  446. udelay(10);
  447. val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
  448. if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
  449. udelay(5);
  450. val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
  451. val1 &= BNX2_EMAC_MDIO_COMM_DATA;
  452. break;
  453. }
  454. }
  455. if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
  456. *val = 0x0;
  457. ret = -EBUSY;
  458. }
  459. else {
  460. *val = val1;
  461. ret = 0;
  462. }
  463. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  464. val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  465. val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  466. REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  467. REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  468. udelay(40);
  469. }
  470. return ret;
  471. }
  472. static int
  473. bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
  474. {
  475. u32 val1;
  476. int i, ret;
  477. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  478. val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  479. val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  480. REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  481. REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  482. udelay(40);
  483. }
  484. val1 = (bp->phy_addr << 21) | (reg << 16) | val |
  485. BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
  486. BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
  487. REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
  488. for (i = 0; i < 50; i++) {
  489. udelay(10);
  490. val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
  491. if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
  492. udelay(5);
  493. break;
  494. }
  495. }
  496. if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
  497. ret = -EBUSY;
  498. else
  499. ret = 0;
  500. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  501. val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  502. val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  503. REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  504. REG_RD(bp, BNX2_EMAC_MDIO_MODE);
  505. udelay(40);
  506. }
  507. return ret;
  508. }
  509. static void
  510. bnx2_disable_int(struct bnx2 *bp)
  511. {
  512. int i;
  513. struct bnx2_napi *bnapi;
  514. for (i = 0; i < bp->irq_nvecs; i++) {
  515. bnapi = &bp->bnx2_napi[i];
  516. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  517. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  518. }
  519. REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
  520. }
  521. static void
  522. bnx2_enable_int(struct bnx2 *bp)
  523. {
  524. int i;
  525. struct bnx2_napi *bnapi;
  526. for (i = 0; i < bp->irq_nvecs; i++) {
  527. bnapi = &bp->bnx2_napi[i];
  528. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  529. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  530. BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
  531. bnapi->last_status_idx);
  532. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  533. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  534. bnapi->last_status_idx);
  535. }
  536. REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
  537. }
  538. static void
  539. bnx2_disable_int_sync(struct bnx2 *bp)
  540. {
  541. int i;
  542. atomic_inc(&bp->intr_sem);
  543. if (!netif_running(bp->dev))
  544. return;
  545. bnx2_disable_int(bp);
  546. for (i = 0; i < bp->irq_nvecs; i++)
  547. synchronize_irq(bp->irq_tbl[i].vector);
  548. }
  549. static void
  550. bnx2_napi_disable(struct bnx2 *bp)
  551. {
  552. int i;
  553. for (i = 0; i < bp->irq_nvecs; i++)
  554. napi_disable(&bp->bnx2_napi[i].napi);
  555. }
  556. static void
  557. bnx2_napi_enable(struct bnx2 *bp)
  558. {
  559. int i;
  560. for (i = 0; i < bp->irq_nvecs; i++)
  561. napi_enable(&bp->bnx2_napi[i].napi);
  562. }
  563. static void
  564. bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
  565. {
  566. if (stop_cnic)
  567. bnx2_cnic_stop(bp);
  568. if (netif_running(bp->dev)) {
  569. bnx2_napi_disable(bp);
  570. netif_tx_disable(bp->dev);
  571. }
  572. bnx2_disable_int_sync(bp);
  573. netif_carrier_off(bp->dev); /* prevent tx timeout */
  574. }
  575. static void
  576. bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
  577. {
  578. if (atomic_dec_and_test(&bp->intr_sem)) {
  579. if (netif_running(bp->dev)) {
  580. netif_tx_wake_all_queues(bp->dev);
  581. spin_lock_bh(&bp->phy_lock);
  582. if (bp->link_up)
  583. netif_carrier_on(bp->dev);
  584. spin_unlock_bh(&bp->phy_lock);
  585. bnx2_napi_enable(bp);
  586. bnx2_enable_int(bp);
  587. if (start_cnic)
  588. bnx2_cnic_start(bp);
  589. }
  590. }
  591. }
  592. static void
  593. bnx2_free_tx_mem(struct bnx2 *bp)
  594. {
  595. int i;
  596. for (i = 0; i < bp->num_tx_rings; i++) {
  597. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  598. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  599. if (txr->tx_desc_ring) {
  600. dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
  601. txr->tx_desc_ring,
  602. txr->tx_desc_mapping);
  603. txr->tx_desc_ring = NULL;
  604. }
  605. kfree(txr->tx_buf_ring);
  606. txr->tx_buf_ring = NULL;
  607. }
  608. }
  609. static void
  610. bnx2_free_rx_mem(struct bnx2 *bp)
  611. {
  612. int i;
  613. for (i = 0; i < bp->num_rx_rings; i++) {
  614. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  615. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  616. int j;
  617. for (j = 0; j < bp->rx_max_ring; j++) {
  618. if (rxr->rx_desc_ring[j])
  619. dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
  620. rxr->rx_desc_ring[j],
  621. rxr->rx_desc_mapping[j]);
  622. rxr->rx_desc_ring[j] = NULL;
  623. }
  624. vfree(rxr->rx_buf_ring);
  625. rxr->rx_buf_ring = NULL;
  626. for (j = 0; j < bp->rx_max_pg_ring; j++) {
  627. if (rxr->rx_pg_desc_ring[j])
  628. dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
  629. rxr->rx_pg_desc_ring[j],
  630. rxr->rx_pg_desc_mapping[j]);
  631. rxr->rx_pg_desc_ring[j] = NULL;
  632. }
  633. vfree(rxr->rx_pg_ring);
  634. rxr->rx_pg_ring = NULL;
  635. }
  636. }
  637. static int
  638. bnx2_alloc_tx_mem(struct bnx2 *bp)
  639. {
  640. int i;
  641. for (i = 0; i < bp->num_tx_rings; i++) {
  642. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  643. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  644. txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
  645. if (txr->tx_buf_ring == NULL)
  646. return -ENOMEM;
  647. txr->tx_desc_ring =
  648. dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
  649. &txr->tx_desc_mapping, GFP_KERNEL);
  650. if (txr->tx_desc_ring == NULL)
  651. return -ENOMEM;
  652. }
  653. return 0;
  654. }
  655. static int
  656. bnx2_alloc_rx_mem(struct bnx2 *bp)
  657. {
  658. int i;
  659. for (i = 0; i < bp->num_rx_rings; i++) {
  660. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  661. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  662. int j;
  663. rxr->rx_buf_ring =
  664. vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
  665. if (rxr->rx_buf_ring == NULL)
  666. return -ENOMEM;
  667. for (j = 0; j < bp->rx_max_ring; j++) {
  668. rxr->rx_desc_ring[j] =
  669. dma_alloc_coherent(&bp->pdev->dev,
  670. RXBD_RING_SIZE,
  671. &rxr->rx_desc_mapping[j],
  672. GFP_KERNEL);
  673. if (rxr->rx_desc_ring[j] == NULL)
  674. return -ENOMEM;
  675. }
  676. if (bp->rx_pg_ring_size) {
  677. rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
  678. bp->rx_max_pg_ring);
  679. if (rxr->rx_pg_ring == NULL)
  680. return -ENOMEM;
  681. }
  682. for (j = 0; j < bp->rx_max_pg_ring; j++) {
  683. rxr->rx_pg_desc_ring[j] =
  684. dma_alloc_coherent(&bp->pdev->dev,
  685. RXBD_RING_SIZE,
  686. &rxr->rx_pg_desc_mapping[j],
  687. GFP_KERNEL);
  688. if (rxr->rx_pg_desc_ring[j] == NULL)
  689. return -ENOMEM;
  690. }
  691. }
  692. return 0;
  693. }
  694. static void
  695. bnx2_free_mem(struct bnx2 *bp)
  696. {
  697. int i;
  698. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  699. bnx2_free_tx_mem(bp);
  700. bnx2_free_rx_mem(bp);
  701. for (i = 0; i < bp->ctx_pages; i++) {
  702. if (bp->ctx_blk[i]) {
  703. dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
  704. bp->ctx_blk[i],
  705. bp->ctx_blk_mapping[i]);
  706. bp->ctx_blk[i] = NULL;
  707. }
  708. }
  709. if (bnapi->status_blk.msi) {
  710. dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
  711. bnapi->status_blk.msi,
  712. bp->status_blk_mapping);
  713. bnapi->status_blk.msi = NULL;
  714. bp->stats_blk = NULL;
  715. }
  716. }
  717. static int
  718. bnx2_alloc_mem(struct bnx2 *bp)
  719. {
  720. int i, status_blk_size, err;
  721. struct bnx2_napi *bnapi;
  722. void *status_blk;
  723. /* Combine status and statistics blocks into one allocation. */
  724. status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
  725. if (bp->flags & BNX2_FLAG_MSIX_CAP)
  726. status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
  727. BNX2_SBLK_MSIX_ALIGN_SIZE);
  728. bp->status_stats_size = status_blk_size +
  729. sizeof(struct statistics_block);
  730. status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
  731. &bp->status_blk_mapping, GFP_KERNEL);
  732. if (status_blk == NULL)
  733. goto alloc_mem_err;
  734. memset(status_blk, 0, bp->status_stats_size);
  735. bnapi = &bp->bnx2_napi[0];
  736. bnapi->status_blk.msi = status_blk;
  737. bnapi->hw_tx_cons_ptr =
  738. &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
  739. bnapi->hw_rx_cons_ptr =
  740. &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
  741. if (bp->flags & BNX2_FLAG_MSIX_CAP) {
  742. for (i = 1; i < bp->irq_nvecs; i++) {
  743. struct status_block_msix *sblk;
  744. bnapi = &bp->bnx2_napi[i];
  745. sblk = (void *) (status_blk +
  746. BNX2_SBLK_MSIX_ALIGN_SIZE * i);
  747. bnapi->status_blk.msix = sblk;
  748. bnapi->hw_tx_cons_ptr =
  749. &sblk->status_tx_quick_consumer_index;
  750. bnapi->hw_rx_cons_ptr =
  751. &sblk->status_rx_quick_consumer_index;
  752. bnapi->int_num = i << 24;
  753. }
  754. }
  755. bp->stats_blk = status_blk + status_blk_size;
  756. bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
  757. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  758. bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
  759. if (bp->ctx_pages == 0)
  760. bp->ctx_pages = 1;
  761. for (i = 0; i < bp->ctx_pages; i++) {
  762. bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
  763. BCM_PAGE_SIZE,
  764. &bp->ctx_blk_mapping[i],
  765. GFP_KERNEL);
  766. if (bp->ctx_blk[i] == NULL)
  767. goto alloc_mem_err;
  768. }
  769. }
  770. err = bnx2_alloc_rx_mem(bp);
  771. if (err)
  772. goto alloc_mem_err;
  773. err = bnx2_alloc_tx_mem(bp);
  774. if (err)
  775. goto alloc_mem_err;
  776. return 0;
  777. alloc_mem_err:
  778. bnx2_free_mem(bp);
  779. return -ENOMEM;
  780. }
  781. static void
  782. bnx2_report_fw_link(struct bnx2 *bp)
  783. {
  784. u32 fw_link_status = 0;
  785. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  786. return;
  787. if (bp->link_up) {
  788. u32 bmsr;
  789. switch (bp->line_speed) {
  790. case SPEED_10:
  791. if (bp->duplex == DUPLEX_HALF)
  792. fw_link_status = BNX2_LINK_STATUS_10HALF;
  793. else
  794. fw_link_status = BNX2_LINK_STATUS_10FULL;
  795. break;
  796. case SPEED_100:
  797. if (bp->duplex == DUPLEX_HALF)
  798. fw_link_status = BNX2_LINK_STATUS_100HALF;
  799. else
  800. fw_link_status = BNX2_LINK_STATUS_100FULL;
  801. break;
  802. case SPEED_1000:
  803. if (bp->duplex == DUPLEX_HALF)
  804. fw_link_status = BNX2_LINK_STATUS_1000HALF;
  805. else
  806. fw_link_status = BNX2_LINK_STATUS_1000FULL;
  807. break;
  808. case SPEED_2500:
  809. if (bp->duplex == DUPLEX_HALF)
  810. fw_link_status = BNX2_LINK_STATUS_2500HALF;
  811. else
  812. fw_link_status = BNX2_LINK_STATUS_2500FULL;
  813. break;
  814. }
  815. fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
  816. if (bp->autoneg) {
  817. fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
  818. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  819. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  820. if (!(bmsr & BMSR_ANEGCOMPLETE) ||
  821. bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
  822. fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
  823. else
  824. fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
  825. }
  826. }
  827. else
  828. fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
  829. bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
  830. }
  831. static char *
  832. bnx2_xceiver_str(struct bnx2 *bp)
  833. {
  834. return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
  835. ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
  836. "Copper");
  837. }
  838. static void
  839. bnx2_report_link(struct bnx2 *bp)
  840. {
  841. if (bp->link_up) {
  842. netif_carrier_on(bp->dev);
  843. netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
  844. bnx2_xceiver_str(bp),
  845. bp->line_speed,
  846. bp->duplex == DUPLEX_FULL ? "full" : "half");
  847. if (bp->flow_ctrl) {
  848. if (bp->flow_ctrl & FLOW_CTRL_RX) {
  849. pr_cont(", receive ");
  850. if (bp->flow_ctrl & FLOW_CTRL_TX)
  851. pr_cont("& transmit ");
  852. }
  853. else {
  854. pr_cont(", transmit ");
  855. }
  856. pr_cont("flow control ON");
  857. }
  858. pr_cont("\n");
  859. } else {
  860. netif_carrier_off(bp->dev);
  861. netdev_err(bp->dev, "NIC %s Link is Down\n",
  862. bnx2_xceiver_str(bp));
  863. }
  864. bnx2_report_fw_link(bp);
  865. }
  866. static void
  867. bnx2_resolve_flow_ctrl(struct bnx2 *bp)
  868. {
  869. u32 local_adv, remote_adv;
  870. bp->flow_ctrl = 0;
  871. if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
  872. (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
  873. if (bp->duplex == DUPLEX_FULL) {
  874. bp->flow_ctrl = bp->req_flow_ctrl;
  875. }
  876. return;
  877. }
  878. if (bp->duplex != DUPLEX_FULL) {
  879. return;
  880. }
  881. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  882. (CHIP_NUM(bp) == CHIP_NUM_5708)) {
  883. u32 val;
  884. bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
  885. if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
  886. bp->flow_ctrl |= FLOW_CTRL_TX;
  887. if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
  888. bp->flow_ctrl |= FLOW_CTRL_RX;
  889. return;
  890. }
  891. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  892. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  893. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  894. u32 new_local_adv = 0;
  895. u32 new_remote_adv = 0;
  896. if (local_adv & ADVERTISE_1000XPAUSE)
  897. new_local_adv |= ADVERTISE_PAUSE_CAP;
  898. if (local_adv & ADVERTISE_1000XPSE_ASYM)
  899. new_local_adv |= ADVERTISE_PAUSE_ASYM;
  900. if (remote_adv & ADVERTISE_1000XPAUSE)
  901. new_remote_adv |= ADVERTISE_PAUSE_CAP;
  902. if (remote_adv & ADVERTISE_1000XPSE_ASYM)
  903. new_remote_adv |= ADVERTISE_PAUSE_ASYM;
  904. local_adv = new_local_adv;
  905. remote_adv = new_remote_adv;
  906. }
  907. /* See Table 28B-3 of 802.3ab-1999 spec. */
  908. if (local_adv & ADVERTISE_PAUSE_CAP) {
  909. if(local_adv & ADVERTISE_PAUSE_ASYM) {
  910. if (remote_adv & ADVERTISE_PAUSE_CAP) {
  911. bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  912. }
  913. else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
  914. bp->flow_ctrl = FLOW_CTRL_RX;
  915. }
  916. }
  917. else {
  918. if (remote_adv & ADVERTISE_PAUSE_CAP) {
  919. bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  920. }
  921. }
  922. }
  923. else if (local_adv & ADVERTISE_PAUSE_ASYM) {
  924. if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
  925. (remote_adv & ADVERTISE_PAUSE_ASYM)) {
  926. bp->flow_ctrl = FLOW_CTRL_TX;
  927. }
  928. }
  929. }
  930. static int
  931. bnx2_5709s_linkup(struct bnx2 *bp)
  932. {
  933. u32 val, speed;
  934. bp->link_up = 1;
  935. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
  936. bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
  937. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  938. if ((bp->autoneg & AUTONEG_SPEED) == 0) {
  939. bp->line_speed = bp->req_line_speed;
  940. bp->duplex = bp->req_duplex;
  941. return 0;
  942. }
  943. speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
  944. switch (speed) {
  945. case MII_BNX2_GP_TOP_AN_SPEED_10:
  946. bp->line_speed = SPEED_10;
  947. break;
  948. case MII_BNX2_GP_TOP_AN_SPEED_100:
  949. bp->line_speed = SPEED_100;
  950. break;
  951. case MII_BNX2_GP_TOP_AN_SPEED_1G:
  952. case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
  953. bp->line_speed = SPEED_1000;
  954. break;
  955. case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
  956. bp->line_speed = SPEED_2500;
  957. break;
  958. }
  959. if (val & MII_BNX2_GP_TOP_AN_FD)
  960. bp->duplex = DUPLEX_FULL;
  961. else
  962. bp->duplex = DUPLEX_HALF;
  963. return 0;
  964. }
  965. static int
  966. bnx2_5708s_linkup(struct bnx2 *bp)
  967. {
  968. u32 val;
  969. bp->link_up = 1;
  970. bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
  971. switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
  972. case BCM5708S_1000X_STAT1_SPEED_10:
  973. bp->line_speed = SPEED_10;
  974. break;
  975. case BCM5708S_1000X_STAT1_SPEED_100:
  976. bp->line_speed = SPEED_100;
  977. break;
  978. case BCM5708S_1000X_STAT1_SPEED_1G:
  979. bp->line_speed = SPEED_1000;
  980. break;
  981. case BCM5708S_1000X_STAT1_SPEED_2G5:
  982. bp->line_speed = SPEED_2500;
  983. break;
  984. }
  985. if (val & BCM5708S_1000X_STAT1_FD)
  986. bp->duplex = DUPLEX_FULL;
  987. else
  988. bp->duplex = DUPLEX_HALF;
  989. return 0;
  990. }
  991. static int
  992. bnx2_5706s_linkup(struct bnx2 *bp)
  993. {
  994. u32 bmcr, local_adv, remote_adv, common;
  995. bp->link_up = 1;
  996. bp->line_speed = SPEED_1000;
  997. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  998. if (bmcr & BMCR_FULLDPLX) {
  999. bp->duplex = DUPLEX_FULL;
  1000. }
  1001. else {
  1002. bp->duplex = DUPLEX_HALF;
  1003. }
  1004. if (!(bmcr & BMCR_ANENABLE)) {
  1005. return 0;
  1006. }
  1007. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  1008. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  1009. common = local_adv & remote_adv;
  1010. if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
  1011. if (common & ADVERTISE_1000XFULL) {
  1012. bp->duplex = DUPLEX_FULL;
  1013. }
  1014. else {
  1015. bp->duplex = DUPLEX_HALF;
  1016. }
  1017. }
  1018. return 0;
  1019. }
  1020. static int
  1021. bnx2_copper_linkup(struct bnx2 *bp)
  1022. {
  1023. u32 bmcr;
  1024. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1025. if (bmcr & BMCR_ANENABLE) {
  1026. u32 local_adv, remote_adv, common;
  1027. bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
  1028. bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
  1029. common = local_adv & (remote_adv >> 2);
  1030. if (common & ADVERTISE_1000FULL) {
  1031. bp->line_speed = SPEED_1000;
  1032. bp->duplex = DUPLEX_FULL;
  1033. }
  1034. else if (common & ADVERTISE_1000HALF) {
  1035. bp->line_speed = SPEED_1000;
  1036. bp->duplex = DUPLEX_HALF;
  1037. }
  1038. else {
  1039. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  1040. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  1041. common = local_adv & remote_adv;
  1042. if (common & ADVERTISE_100FULL) {
  1043. bp->line_speed = SPEED_100;
  1044. bp->duplex = DUPLEX_FULL;
  1045. }
  1046. else if (common & ADVERTISE_100HALF) {
  1047. bp->line_speed = SPEED_100;
  1048. bp->duplex = DUPLEX_HALF;
  1049. }
  1050. else if (common & ADVERTISE_10FULL) {
  1051. bp->line_speed = SPEED_10;
  1052. bp->duplex = DUPLEX_FULL;
  1053. }
  1054. else if (common & ADVERTISE_10HALF) {
  1055. bp->line_speed = SPEED_10;
  1056. bp->duplex = DUPLEX_HALF;
  1057. }
  1058. else {
  1059. bp->line_speed = 0;
  1060. bp->link_up = 0;
  1061. }
  1062. }
  1063. }
  1064. else {
  1065. if (bmcr & BMCR_SPEED100) {
  1066. bp->line_speed = SPEED_100;
  1067. }
  1068. else {
  1069. bp->line_speed = SPEED_10;
  1070. }
  1071. if (bmcr & BMCR_FULLDPLX) {
  1072. bp->duplex = DUPLEX_FULL;
  1073. }
  1074. else {
  1075. bp->duplex = DUPLEX_HALF;
  1076. }
  1077. }
  1078. return 0;
  1079. }
  1080. static void
  1081. bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
  1082. {
  1083. u32 val, rx_cid_addr = GET_CID_ADDR(cid);
  1084. val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
  1085. val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
  1086. val |= 0x02 << 8;
  1087. if (bp->flow_ctrl & FLOW_CTRL_TX)
  1088. val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
  1089. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
  1090. }
  1091. static void
  1092. bnx2_init_all_rx_contexts(struct bnx2 *bp)
  1093. {
  1094. int i;
  1095. u32 cid;
  1096. for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
  1097. if (i == 1)
  1098. cid = RX_RSS_CID;
  1099. bnx2_init_rx_context(bp, cid);
  1100. }
  1101. }
  1102. static void
  1103. bnx2_set_mac_link(struct bnx2 *bp)
  1104. {
  1105. u32 val;
  1106. REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
  1107. if (bp->link_up && (bp->line_speed == SPEED_1000) &&
  1108. (bp->duplex == DUPLEX_HALF)) {
  1109. REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
  1110. }
  1111. /* Configure the EMAC mode register. */
  1112. val = REG_RD(bp, BNX2_EMAC_MODE);
  1113. val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
  1114. BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
  1115. BNX2_EMAC_MODE_25G_MODE);
  1116. if (bp->link_up) {
  1117. switch (bp->line_speed) {
  1118. case SPEED_10:
  1119. if (CHIP_NUM(bp) != CHIP_NUM_5706) {
  1120. val |= BNX2_EMAC_MODE_PORT_MII_10M;
  1121. break;
  1122. }
  1123. /* fall through */
  1124. case SPEED_100:
  1125. val |= BNX2_EMAC_MODE_PORT_MII;
  1126. break;
  1127. case SPEED_2500:
  1128. val |= BNX2_EMAC_MODE_25G_MODE;
  1129. /* fall through */
  1130. case SPEED_1000:
  1131. val |= BNX2_EMAC_MODE_PORT_GMII;
  1132. break;
  1133. }
  1134. }
  1135. else {
  1136. val |= BNX2_EMAC_MODE_PORT_GMII;
  1137. }
  1138. /* Set the MAC to operate in the appropriate duplex mode. */
  1139. if (bp->duplex == DUPLEX_HALF)
  1140. val |= BNX2_EMAC_MODE_HALF_DUPLEX;
  1141. REG_WR(bp, BNX2_EMAC_MODE, val);
  1142. /* Enable/disable rx PAUSE. */
  1143. bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
  1144. if (bp->flow_ctrl & FLOW_CTRL_RX)
  1145. bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
  1146. REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
  1147. /* Enable/disable tx PAUSE. */
  1148. val = REG_RD(bp, BNX2_EMAC_TX_MODE);
  1149. val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
  1150. if (bp->flow_ctrl & FLOW_CTRL_TX)
  1151. val |= BNX2_EMAC_TX_MODE_FLOW_EN;
  1152. REG_WR(bp, BNX2_EMAC_TX_MODE, val);
  1153. /* Acknowledge the interrupt. */
  1154. REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
  1155. bnx2_init_all_rx_contexts(bp);
  1156. }
  1157. static void
  1158. bnx2_enable_bmsr1(struct bnx2 *bp)
  1159. {
  1160. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1161. (CHIP_NUM(bp) == CHIP_NUM_5709))
  1162. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1163. MII_BNX2_BLK_ADDR_GP_STATUS);
  1164. }
  1165. static void
  1166. bnx2_disable_bmsr1(struct bnx2 *bp)
  1167. {
  1168. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1169. (CHIP_NUM(bp) == CHIP_NUM_5709))
  1170. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1171. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1172. }
  1173. static int
  1174. bnx2_test_and_enable_2g5(struct bnx2 *bp)
  1175. {
  1176. u32 up1;
  1177. int ret = 1;
  1178. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1179. return 0;
  1180. if (bp->autoneg & AUTONEG_SPEED)
  1181. bp->advertising |= ADVERTISED_2500baseX_Full;
  1182. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1183. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1184. bnx2_read_phy(bp, bp->mii_up1, &up1);
  1185. if (!(up1 & BCM5708S_UP1_2G5)) {
  1186. up1 |= BCM5708S_UP1_2G5;
  1187. bnx2_write_phy(bp, bp->mii_up1, up1);
  1188. ret = 0;
  1189. }
  1190. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1191. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1192. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1193. return ret;
  1194. }
  1195. static int
  1196. bnx2_test_and_disable_2g5(struct bnx2 *bp)
  1197. {
  1198. u32 up1;
  1199. int ret = 0;
  1200. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1201. return 0;
  1202. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1203. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1204. bnx2_read_phy(bp, bp->mii_up1, &up1);
  1205. if (up1 & BCM5708S_UP1_2G5) {
  1206. up1 &= ~BCM5708S_UP1_2G5;
  1207. bnx2_write_phy(bp, bp->mii_up1, up1);
  1208. ret = 1;
  1209. }
  1210. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1211. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1212. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1213. return ret;
  1214. }
  1215. static void
  1216. bnx2_enable_forced_2g5(struct bnx2 *bp)
  1217. {
  1218. u32 uninitialized_var(bmcr);
  1219. int err;
  1220. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1221. return;
  1222. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1223. u32 val;
  1224. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1225. MII_BNX2_BLK_ADDR_SERDES_DIG);
  1226. if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
  1227. val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
  1228. val |= MII_BNX2_SD_MISC1_FORCE |
  1229. MII_BNX2_SD_MISC1_FORCE_2_5G;
  1230. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
  1231. }
  1232. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1233. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1234. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1235. } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  1236. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1237. if (!err)
  1238. bmcr |= BCM5708S_BMCR_FORCE_2500;
  1239. } else {
  1240. return;
  1241. }
  1242. if (err)
  1243. return;
  1244. if (bp->autoneg & AUTONEG_SPEED) {
  1245. bmcr &= ~BMCR_ANENABLE;
  1246. if (bp->req_duplex == DUPLEX_FULL)
  1247. bmcr |= BMCR_FULLDPLX;
  1248. }
  1249. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1250. }
  1251. static void
  1252. bnx2_disable_forced_2g5(struct bnx2 *bp)
  1253. {
  1254. u32 uninitialized_var(bmcr);
  1255. int err;
  1256. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1257. return;
  1258. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1259. u32 val;
  1260. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1261. MII_BNX2_BLK_ADDR_SERDES_DIG);
  1262. if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
  1263. val &= ~MII_BNX2_SD_MISC1_FORCE;
  1264. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
  1265. }
  1266. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1267. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1268. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1269. } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  1270. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1271. if (!err)
  1272. bmcr &= ~BCM5708S_BMCR_FORCE_2500;
  1273. } else {
  1274. return;
  1275. }
  1276. if (err)
  1277. return;
  1278. if (bp->autoneg & AUTONEG_SPEED)
  1279. bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
  1280. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1281. }
  1282. static void
  1283. bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
  1284. {
  1285. u32 val;
  1286. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
  1287. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
  1288. if (start)
  1289. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
  1290. else
  1291. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
  1292. }
  1293. static int
  1294. bnx2_set_link(struct bnx2 *bp)
  1295. {
  1296. u32 bmsr;
  1297. u8 link_up;
  1298. if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
  1299. bp->link_up = 1;
  1300. return 0;
  1301. }
  1302. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  1303. return 0;
  1304. link_up = bp->link_up;
  1305. bnx2_enable_bmsr1(bp);
  1306. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  1307. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  1308. bnx2_disable_bmsr1(bp);
  1309. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1310. (CHIP_NUM(bp) == CHIP_NUM_5706)) {
  1311. u32 val, an_dbg;
  1312. if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
  1313. bnx2_5706s_force_link_dn(bp, 0);
  1314. bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
  1315. }
  1316. val = REG_RD(bp, BNX2_EMAC_STATUS);
  1317. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  1318. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  1319. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  1320. if ((val & BNX2_EMAC_STATUS_LINK) &&
  1321. !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
  1322. bmsr |= BMSR_LSTATUS;
  1323. else
  1324. bmsr &= ~BMSR_LSTATUS;
  1325. }
  1326. if (bmsr & BMSR_LSTATUS) {
  1327. bp->link_up = 1;
  1328. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1329. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  1330. bnx2_5706s_linkup(bp);
  1331. else if (CHIP_NUM(bp) == CHIP_NUM_5708)
  1332. bnx2_5708s_linkup(bp);
  1333. else if (CHIP_NUM(bp) == CHIP_NUM_5709)
  1334. bnx2_5709s_linkup(bp);
  1335. }
  1336. else {
  1337. bnx2_copper_linkup(bp);
  1338. }
  1339. bnx2_resolve_flow_ctrl(bp);
  1340. }
  1341. else {
  1342. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1343. (bp->autoneg & AUTONEG_SPEED))
  1344. bnx2_disable_forced_2g5(bp);
  1345. if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
  1346. u32 bmcr;
  1347. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1348. bmcr |= BMCR_ANENABLE;
  1349. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1350. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  1351. }
  1352. bp->link_up = 0;
  1353. }
  1354. if (bp->link_up != link_up) {
  1355. bnx2_report_link(bp);
  1356. }
  1357. bnx2_set_mac_link(bp);
  1358. return 0;
  1359. }
  1360. static int
  1361. bnx2_reset_phy(struct bnx2 *bp)
  1362. {
  1363. int i;
  1364. u32 reg;
  1365. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
  1366. #define PHY_RESET_MAX_WAIT 100
  1367. for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
  1368. udelay(10);
  1369. bnx2_read_phy(bp, bp->mii_bmcr, &reg);
  1370. if (!(reg & BMCR_RESET)) {
  1371. udelay(20);
  1372. break;
  1373. }
  1374. }
  1375. if (i == PHY_RESET_MAX_WAIT) {
  1376. return -EBUSY;
  1377. }
  1378. return 0;
  1379. }
  1380. static u32
  1381. bnx2_phy_get_pause_adv(struct bnx2 *bp)
  1382. {
  1383. u32 adv = 0;
  1384. if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
  1385. (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
  1386. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1387. adv = ADVERTISE_1000XPAUSE;
  1388. }
  1389. else {
  1390. adv = ADVERTISE_PAUSE_CAP;
  1391. }
  1392. }
  1393. else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
  1394. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1395. adv = ADVERTISE_1000XPSE_ASYM;
  1396. }
  1397. else {
  1398. adv = ADVERTISE_PAUSE_ASYM;
  1399. }
  1400. }
  1401. else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
  1402. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1403. adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
  1404. }
  1405. else {
  1406. adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  1407. }
  1408. }
  1409. return adv;
  1410. }
  1411. static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
  1412. static int
  1413. bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
  1414. __releases(&bp->phy_lock)
  1415. __acquires(&bp->phy_lock)
  1416. {
  1417. u32 speed_arg = 0, pause_adv;
  1418. pause_adv = bnx2_phy_get_pause_adv(bp);
  1419. if (bp->autoneg & AUTONEG_SPEED) {
  1420. speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
  1421. if (bp->advertising & ADVERTISED_10baseT_Half)
  1422. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
  1423. if (bp->advertising & ADVERTISED_10baseT_Full)
  1424. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
  1425. if (bp->advertising & ADVERTISED_100baseT_Half)
  1426. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
  1427. if (bp->advertising & ADVERTISED_100baseT_Full)
  1428. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
  1429. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1430. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
  1431. if (bp->advertising & ADVERTISED_2500baseX_Full)
  1432. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
  1433. } else {
  1434. if (bp->req_line_speed == SPEED_2500)
  1435. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
  1436. else if (bp->req_line_speed == SPEED_1000)
  1437. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
  1438. else if (bp->req_line_speed == SPEED_100) {
  1439. if (bp->req_duplex == DUPLEX_FULL)
  1440. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
  1441. else
  1442. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
  1443. } else if (bp->req_line_speed == SPEED_10) {
  1444. if (bp->req_duplex == DUPLEX_FULL)
  1445. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
  1446. else
  1447. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
  1448. }
  1449. }
  1450. if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
  1451. speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
  1452. if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
  1453. speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
  1454. if (port == PORT_TP)
  1455. speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
  1456. BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
  1457. bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
  1458. spin_unlock_bh(&bp->phy_lock);
  1459. bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
  1460. spin_lock_bh(&bp->phy_lock);
  1461. return 0;
  1462. }
  1463. static int
  1464. bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
  1465. __releases(&bp->phy_lock)
  1466. __acquires(&bp->phy_lock)
  1467. {
  1468. u32 adv, bmcr;
  1469. u32 new_adv = 0;
  1470. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  1471. return bnx2_setup_remote_phy(bp, port);
  1472. if (!(bp->autoneg & AUTONEG_SPEED)) {
  1473. u32 new_bmcr;
  1474. int force_link_down = 0;
  1475. if (bp->req_line_speed == SPEED_2500) {
  1476. if (!bnx2_test_and_enable_2g5(bp))
  1477. force_link_down = 1;
  1478. } else if (bp->req_line_speed == SPEED_1000) {
  1479. if (bnx2_test_and_disable_2g5(bp))
  1480. force_link_down = 1;
  1481. }
  1482. bnx2_read_phy(bp, bp->mii_adv, &adv);
  1483. adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
  1484. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1485. new_bmcr = bmcr & ~BMCR_ANENABLE;
  1486. new_bmcr |= BMCR_SPEED1000;
  1487. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  1488. if (bp->req_line_speed == SPEED_2500)
  1489. bnx2_enable_forced_2g5(bp);
  1490. else if (bp->req_line_speed == SPEED_1000) {
  1491. bnx2_disable_forced_2g5(bp);
  1492. new_bmcr &= ~0x2000;
  1493. }
  1494. } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
  1495. if (bp->req_line_speed == SPEED_2500)
  1496. new_bmcr |= BCM5708S_BMCR_FORCE_2500;
  1497. else
  1498. new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
  1499. }
  1500. if (bp->req_duplex == DUPLEX_FULL) {
  1501. adv |= ADVERTISE_1000XFULL;
  1502. new_bmcr |= BMCR_FULLDPLX;
  1503. }
  1504. else {
  1505. adv |= ADVERTISE_1000XHALF;
  1506. new_bmcr &= ~BMCR_FULLDPLX;
  1507. }
  1508. if ((new_bmcr != bmcr) || (force_link_down)) {
  1509. /* Force a link down visible on the other side */
  1510. if (bp->link_up) {
  1511. bnx2_write_phy(bp, bp->mii_adv, adv &
  1512. ~(ADVERTISE_1000XFULL |
  1513. ADVERTISE_1000XHALF));
  1514. bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
  1515. BMCR_ANRESTART | BMCR_ANENABLE);
  1516. bp->link_up = 0;
  1517. netif_carrier_off(bp->dev);
  1518. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1519. bnx2_report_link(bp);
  1520. }
  1521. bnx2_write_phy(bp, bp->mii_adv, adv);
  1522. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1523. } else {
  1524. bnx2_resolve_flow_ctrl(bp);
  1525. bnx2_set_mac_link(bp);
  1526. }
  1527. return 0;
  1528. }
  1529. bnx2_test_and_enable_2g5(bp);
  1530. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1531. new_adv |= ADVERTISE_1000XFULL;
  1532. new_adv |= bnx2_phy_get_pause_adv(bp);
  1533. bnx2_read_phy(bp, bp->mii_adv, &adv);
  1534. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1535. bp->serdes_an_pending = 0;
  1536. if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
  1537. /* Force a link down visible on the other side */
  1538. if (bp->link_up) {
  1539. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  1540. spin_unlock_bh(&bp->phy_lock);
  1541. msleep(20);
  1542. spin_lock_bh(&bp->phy_lock);
  1543. }
  1544. bnx2_write_phy(bp, bp->mii_adv, new_adv);
  1545. bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
  1546. BMCR_ANENABLE);
  1547. /* Speed up link-up time when the link partner
  1548. * does not autonegotiate which is very common
  1549. * in blade servers. Some blade servers use
  1550. * IPMI for kerboard input and it's important
  1551. * to minimize link disruptions. Autoneg. involves
  1552. * exchanging base pages plus 3 next pages and
  1553. * normally completes in about 120 msec.
  1554. */
  1555. bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
  1556. bp->serdes_an_pending = 1;
  1557. mod_timer(&bp->timer, jiffies + bp->current_interval);
  1558. } else {
  1559. bnx2_resolve_flow_ctrl(bp);
  1560. bnx2_set_mac_link(bp);
  1561. }
  1562. return 0;
  1563. }
  1564. #define ETHTOOL_ALL_FIBRE_SPEED \
  1565. (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
  1566. (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
  1567. (ADVERTISED_1000baseT_Full)
  1568. #define ETHTOOL_ALL_COPPER_SPEED \
  1569. (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
  1570. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
  1571. ADVERTISED_1000baseT_Full)
  1572. #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
  1573. ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
  1574. #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
  1575. static void
  1576. bnx2_set_default_remote_link(struct bnx2 *bp)
  1577. {
  1578. u32 link;
  1579. if (bp->phy_port == PORT_TP)
  1580. link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
  1581. else
  1582. link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
  1583. if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
  1584. bp->req_line_speed = 0;
  1585. bp->autoneg |= AUTONEG_SPEED;
  1586. bp->advertising = ADVERTISED_Autoneg;
  1587. if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
  1588. bp->advertising |= ADVERTISED_10baseT_Half;
  1589. if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
  1590. bp->advertising |= ADVERTISED_10baseT_Full;
  1591. if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
  1592. bp->advertising |= ADVERTISED_100baseT_Half;
  1593. if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
  1594. bp->advertising |= ADVERTISED_100baseT_Full;
  1595. if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
  1596. bp->advertising |= ADVERTISED_1000baseT_Full;
  1597. if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
  1598. bp->advertising |= ADVERTISED_2500baseX_Full;
  1599. } else {
  1600. bp->autoneg = 0;
  1601. bp->advertising = 0;
  1602. bp->req_duplex = DUPLEX_FULL;
  1603. if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
  1604. bp->req_line_speed = SPEED_10;
  1605. if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
  1606. bp->req_duplex = DUPLEX_HALF;
  1607. }
  1608. if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
  1609. bp->req_line_speed = SPEED_100;
  1610. if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
  1611. bp->req_duplex = DUPLEX_HALF;
  1612. }
  1613. if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
  1614. bp->req_line_speed = SPEED_1000;
  1615. if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
  1616. bp->req_line_speed = SPEED_2500;
  1617. }
  1618. }
  1619. static void
  1620. bnx2_set_default_link(struct bnx2 *bp)
  1621. {
  1622. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  1623. bnx2_set_default_remote_link(bp);
  1624. return;
  1625. }
  1626. bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
  1627. bp->req_line_speed = 0;
  1628. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1629. u32 reg;
  1630. bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
  1631. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
  1632. reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
  1633. if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
  1634. bp->autoneg = 0;
  1635. bp->req_line_speed = bp->line_speed = SPEED_1000;
  1636. bp->req_duplex = DUPLEX_FULL;
  1637. }
  1638. } else
  1639. bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
  1640. }
  1641. static void
  1642. bnx2_send_heart_beat(struct bnx2 *bp)
  1643. {
  1644. u32 msg;
  1645. u32 addr;
  1646. spin_lock(&bp->indirect_lock);
  1647. msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
  1648. addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
  1649. REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
  1650. REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
  1651. spin_unlock(&bp->indirect_lock);
  1652. }
  1653. static void
  1654. bnx2_remote_phy_event(struct bnx2 *bp)
  1655. {
  1656. u32 msg;
  1657. u8 link_up = bp->link_up;
  1658. u8 old_port;
  1659. msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
  1660. if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
  1661. bnx2_send_heart_beat(bp);
  1662. msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
  1663. if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
  1664. bp->link_up = 0;
  1665. else {
  1666. u32 speed;
  1667. bp->link_up = 1;
  1668. speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
  1669. bp->duplex = DUPLEX_FULL;
  1670. switch (speed) {
  1671. case BNX2_LINK_STATUS_10HALF:
  1672. bp->duplex = DUPLEX_HALF;
  1673. case BNX2_LINK_STATUS_10FULL:
  1674. bp->line_speed = SPEED_10;
  1675. break;
  1676. case BNX2_LINK_STATUS_100HALF:
  1677. bp->duplex = DUPLEX_HALF;
  1678. case BNX2_LINK_STATUS_100BASE_T4:
  1679. case BNX2_LINK_STATUS_100FULL:
  1680. bp->line_speed = SPEED_100;
  1681. break;
  1682. case BNX2_LINK_STATUS_1000HALF:
  1683. bp->duplex = DUPLEX_HALF;
  1684. case BNX2_LINK_STATUS_1000FULL:
  1685. bp->line_speed = SPEED_1000;
  1686. break;
  1687. case BNX2_LINK_STATUS_2500HALF:
  1688. bp->duplex = DUPLEX_HALF;
  1689. case BNX2_LINK_STATUS_2500FULL:
  1690. bp->line_speed = SPEED_2500;
  1691. break;
  1692. default:
  1693. bp->line_speed = 0;
  1694. break;
  1695. }
  1696. bp->flow_ctrl = 0;
  1697. if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
  1698. (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
  1699. if (bp->duplex == DUPLEX_FULL)
  1700. bp->flow_ctrl = bp->req_flow_ctrl;
  1701. } else {
  1702. if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
  1703. bp->flow_ctrl |= FLOW_CTRL_TX;
  1704. if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
  1705. bp->flow_ctrl |= FLOW_CTRL_RX;
  1706. }
  1707. old_port = bp->phy_port;
  1708. if (msg & BNX2_LINK_STATUS_SERDES_LINK)
  1709. bp->phy_port = PORT_FIBRE;
  1710. else
  1711. bp->phy_port = PORT_TP;
  1712. if (old_port != bp->phy_port)
  1713. bnx2_set_default_link(bp);
  1714. }
  1715. if (bp->link_up != link_up)
  1716. bnx2_report_link(bp);
  1717. bnx2_set_mac_link(bp);
  1718. }
  1719. static int
  1720. bnx2_set_remote_link(struct bnx2 *bp)
  1721. {
  1722. u32 evt_code;
  1723. evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
  1724. switch (evt_code) {
  1725. case BNX2_FW_EVT_CODE_LINK_EVENT:
  1726. bnx2_remote_phy_event(bp);
  1727. break;
  1728. case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
  1729. default:
  1730. bnx2_send_heart_beat(bp);
  1731. break;
  1732. }
  1733. return 0;
  1734. }
  1735. static int
  1736. bnx2_setup_copper_phy(struct bnx2 *bp)
  1737. __releases(&bp->phy_lock)
  1738. __acquires(&bp->phy_lock)
  1739. {
  1740. u32 bmcr;
  1741. u32 new_bmcr;
  1742. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1743. if (bp->autoneg & AUTONEG_SPEED) {
  1744. u32 adv_reg, adv1000_reg;
  1745. u32 new_adv_reg = 0;
  1746. u32 new_adv1000_reg = 0;
  1747. bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
  1748. adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
  1749. ADVERTISE_PAUSE_ASYM);
  1750. bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
  1751. adv1000_reg &= PHY_ALL_1000_SPEED;
  1752. if (bp->advertising & ADVERTISED_10baseT_Half)
  1753. new_adv_reg |= ADVERTISE_10HALF;
  1754. if (bp->advertising & ADVERTISED_10baseT_Full)
  1755. new_adv_reg |= ADVERTISE_10FULL;
  1756. if (bp->advertising & ADVERTISED_100baseT_Half)
  1757. new_adv_reg |= ADVERTISE_100HALF;
  1758. if (bp->advertising & ADVERTISED_100baseT_Full)
  1759. new_adv_reg |= ADVERTISE_100FULL;
  1760. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1761. new_adv1000_reg |= ADVERTISE_1000FULL;
  1762. new_adv_reg |= ADVERTISE_CSMA;
  1763. new_adv_reg |= bnx2_phy_get_pause_adv(bp);
  1764. if ((adv1000_reg != new_adv1000_reg) ||
  1765. (adv_reg != new_adv_reg) ||
  1766. ((bmcr & BMCR_ANENABLE) == 0)) {
  1767. bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
  1768. bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
  1769. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
  1770. BMCR_ANENABLE);
  1771. }
  1772. else if (bp->link_up) {
  1773. /* Flow ctrl may have changed from auto to forced */
  1774. /* or vice-versa. */
  1775. bnx2_resolve_flow_ctrl(bp);
  1776. bnx2_set_mac_link(bp);
  1777. }
  1778. return 0;
  1779. }
  1780. new_bmcr = 0;
  1781. if (bp->req_line_speed == SPEED_100) {
  1782. new_bmcr |= BMCR_SPEED100;
  1783. }
  1784. if (bp->req_duplex == DUPLEX_FULL) {
  1785. new_bmcr |= BMCR_FULLDPLX;
  1786. }
  1787. if (new_bmcr != bmcr) {
  1788. u32 bmsr;
  1789. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1790. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1791. if (bmsr & BMSR_LSTATUS) {
  1792. /* Force link down */
  1793. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  1794. spin_unlock_bh(&bp->phy_lock);
  1795. msleep(50);
  1796. spin_lock_bh(&bp->phy_lock);
  1797. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1798. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  1799. }
  1800. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1801. /* Normally, the new speed is setup after the link has
  1802. * gone down and up again. In some cases, link will not go
  1803. * down so we need to set up the new speed here.
  1804. */
  1805. if (bmsr & BMSR_LSTATUS) {
  1806. bp->line_speed = bp->req_line_speed;
  1807. bp->duplex = bp->req_duplex;
  1808. bnx2_resolve_flow_ctrl(bp);
  1809. bnx2_set_mac_link(bp);
  1810. }
  1811. } else {
  1812. bnx2_resolve_flow_ctrl(bp);
  1813. bnx2_set_mac_link(bp);
  1814. }
  1815. return 0;
  1816. }
  1817. static int
  1818. bnx2_setup_phy(struct bnx2 *bp, u8 port)
  1819. __releases(&bp->phy_lock)
  1820. __acquires(&bp->phy_lock)
  1821. {
  1822. if (bp->loopback == MAC_LOOPBACK)
  1823. return 0;
  1824. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1825. return bnx2_setup_serdes_phy(bp, port);
  1826. }
  1827. else {
  1828. return bnx2_setup_copper_phy(bp);
  1829. }
  1830. }
  1831. static int
  1832. bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
  1833. {
  1834. u32 val;
  1835. bp->mii_bmcr = MII_BMCR + 0x10;
  1836. bp->mii_bmsr = MII_BMSR + 0x10;
  1837. bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
  1838. bp->mii_adv = MII_ADVERTISE + 0x10;
  1839. bp->mii_lpa = MII_LPA + 0x10;
  1840. bp->mii_up1 = MII_BNX2_OVER1G_UP1;
  1841. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
  1842. bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
  1843. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1844. if (reset_phy)
  1845. bnx2_reset_phy(bp);
  1846. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
  1847. bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
  1848. val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
  1849. val |= MII_BNX2_SD_1000XCTL1_FIBER;
  1850. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
  1851. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1852. bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
  1853. if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
  1854. val |= BCM5708S_UP1_2G5;
  1855. else
  1856. val &= ~BCM5708S_UP1_2G5;
  1857. bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
  1858. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
  1859. bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
  1860. val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
  1861. bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
  1862. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
  1863. val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
  1864. MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
  1865. bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
  1866. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1867. return 0;
  1868. }
  1869. static int
  1870. bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
  1871. {
  1872. u32 val;
  1873. if (reset_phy)
  1874. bnx2_reset_phy(bp);
  1875. bp->mii_up1 = BCM5708S_UP1;
  1876. bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
  1877. bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
  1878. bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
  1879. bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
  1880. val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
  1881. bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
  1882. bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
  1883. val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
  1884. bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
  1885. if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
  1886. bnx2_read_phy(bp, BCM5708S_UP1, &val);
  1887. val |= BCM5708S_UP1_2G5;
  1888. bnx2_write_phy(bp, BCM5708S_UP1, val);
  1889. }
  1890. if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
  1891. (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
  1892. (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
  1893. /* increase tx signal amplitude */
  1894. bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
  1895. BCM5708S_BLK_ADDR_TX_MISC);
  1896. bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
  1897. val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
  1898. bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
  1899. bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
  1900. }
  1901. val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
  1902. BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
  1903. if (val) {
  1904. u32 is_backplane;
  1905. is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
  1906. if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
  1907. bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
  1908. BCM5708S_BLK_ADDR_TX_MISC);
  1909. bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
  1910. bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
  1911. BCM5708S_BLK_ADDR_DIG);
  1912. }
  1913. }
  1914. return 0;
  1915. }
  1916. static int
  1917. bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
  1918. {
  1919. if (reset_phy)
  1920. bnx2_reset_phy(bp);
  1921. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  1922. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  1923. REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
  1924. if (bp->dev->mtu > 1500) {
  1925. u32 val;
  1926. /* Set extended packet length bit */
  1927. bnx2_write_phy(bp, 0x18, 0x7);
  1928. bnx2_read_phy(bp, 0x18, &val);
  1929. bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
  1930. bnx2_write_phy(bp, 0x1c, 0x6c00);
  1931. bnx2_read_phy(bp, 0x1c, &val);
  1932. bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
  1933. }
  1934. else {
  1935. u32 val;
  1936. bnx2_write_phy(bp, 0x18, 0x7);
  1937. bnx2_read_phy(bp, 0x18, &val);
  1938. bnx2_write_phy(bp, 0x18, val & ~0x4007);
  1939. bnx2_write_phy(bp, 0x1c, 0x6c00);
  1940. bnx2_read_phy(bp, 0x1c, &val);
  1941. bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
  1942. }
  1943. return 0;
  1944. }
  1945. static int
  1946. bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
  1947. {
  1948. u32 val;
  1949. if (reset_phy)
  1950. bnx2_reset_phy(bp);
  1951. if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
  1952. bnx2_write_phy(bp, 0x18, 0x0c00);
  1953. bnx2_write_phy(bp, 0x17, 0x000a);
  1954. bnx2_write_phy(bp, 0x15, 0x310b);
  1955. bnx2_write_phy(bp, 0x17, 0x201f);
  1956. bnx2_write_phy(bp, 0x15, 0x9506);
  1957. bnx2_write_phy(bp, 0x17, 0x401f);
  1958. bnx2_write_phy(bp, 0x15, 0x14e2);
  1959. bnx2_write_phy(bp, 0x18, 0x0400);
  1960. }
  1961. if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
  1962. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
  1963. MII_BNX2_DSP_EXPAND_REG | 0x8);
  1964. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
  1965. val &= ~(1 << 8);
  1966. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
  1967. }
  1968. if (bp->dev->mtu > 1500) {
  1969. /* Set extended packet length bit */
  1970. bnx2_write_phy(bp, 0x18, 0x7);
  1971. bnx2_read_phy(bp, 0x18, &val);
  1972. bnx2_write_phy(bp, 0x18, val | 0x4000);
  1973. bnx2_read_phy(bp, 0x10, &val);
  1974. bnx2_write_phy(bp, 0x10, val | 0x1);
  1975. }
  1976. else {
  1977. bnx2_write_phy(bp, 0x18, 0x7);
  1978. bnx2_read_phy(bp, 0x18, &val);
  1979. bnx2_write_phy(bp, 0x18, val & ~0x4007);
  1980. bnx2_read_phy(bp, 0x10, &val);
  1981. bnx2_write_phy(bp, 0x10, val & ~0x1);
  1982. }
  1983. /* ethernet@wirespeed */
  1984. bnx2_write_phy(bp, 0x18, 0x7007);
  1985. bnx2_read_phy(bp, 0x18, &val);
  1986. bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
  1987. return 0;
  1988. }
  1989. static int
  1990. bnx2_init_phy(struct bnx2 *bp, int reset_phy)
  1991. __releases(&bp->phy_lock)
  1992. __acquires(&bp->phy_lock)
  1993. {
  1994. u32 val;
  1995. int rc = 0;
  1996. bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
  1997. bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
  1998. bp->mii_bmcr = MII_BMCR;
  1999. bp->mii_bmsr = MII_BMSR;
  2000. bp->mii_bmsr1 = MII_BMSR;
  2001. bp->mii_adv = MII_ADVERTISE;
  2002. bp->mii_lpa = MII_LPA;
  2003. REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
  2004. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  2005. goto setup_phy;
  2006. bnx2_read_phy(bp, MII_PHYSID1, &val);
  2007. bp->phy_id = val << 16;
  2008. bnx2_read_phy(bp, MII_PHYSID2, &val);
  2009. bp->phy_id |= val & 0xffff;
  2010. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  2011. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  2012. rc = bnx2_init_5706s_phy(bp, reset_phy);
  2013. else if (CHIP_NUM(bp) == CHIP_NUM_5708)
  2014. rc = bnx2_init_5708s_phy(bp, reset_phy);
  2015. else if (CHIP_NUM(bp) == CHIP_NUM_5709)
  2016. rc = bnx2_init_5709s_phy(bp, reset_phy);
  2017. }
  2018. else {
  2019. rc = bnx2_init_copper_phy(bp, reset_phy);
  2020. }
  2021. setup_phy:
  2022. if (!rc)
  2023. rc = bnx2_setup_phy(bp, bp->phy_port);
  2024. return rc;
  2025. }
  2026. static int
  2027. bnx2_set_mac_loopback(struct bnx2 *bp)
  2028. {
  2029. u32 mac_mode;
  2030. mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
  2031. mac_mode &= ~BNX2_EMAC_MODE_PORT;
  2032. mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
  2033. REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
  2034. bp->link_up = 1;
  2035. return 0;
  2036. }
  2037. static int bnx2_test_link(struct bnx2 *);
  2038. static int
  2039. bnx2_set_phy_loopback(struct bnx2 *bp)
  2040. {
  2041. u32 mac_mode;
  2042. int rc, i;
  2043. spin_lock_bh(&bp->phy_lock);
  2044. rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
  2045. BMCR_SPEED1000);
  2046. spin_unlock_bh(&bp->phy_lock);
  2047. if (rc)
  2048. return rc;
  2049. for (i = 0; i < 10; i++) {
  2050. if (bnx2_test_link(bp) == 0)
  2051. break;
  2052. msleep(100);
  2053. }
  2054. mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
  2055. mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
  2056. BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
  2057. BNX2_EMAC_MODE_25G_MODE);
  2058. mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
  2059. REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
  2060. bp->link_up = 1;
  2061. return 0;
  2062. }
  2063. static int
  2064. bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
  2065. {
  2066. int i;
  2067. u32 val;
  2068. bp->fw_wr_seq++;
  2069. msg_data |= bp->fw_wr_seq;
  2070. bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
  2071. if (!ack)
  2072. return 0;
  2073. /* wait for an acknowledgement. */
  2074. for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
  2075. msleep(10);
  2076. val = bnx2_shmem_rd(bp, BNX2_FW_MB);
  2077. if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
  2078. break;
  2079. }
  2080. if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
  2081. return 0;
  2082. /* If we timed out, inform the firmware that this is the case. */
  2083. if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
  2084. if (!silent)
  2085. pr_err("fw sync timeout, reset code = %x\n", msg_data);
  2086. msg_data &= ~BNX2_DRV_MSG_CODE;
  2087. msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
  2088. bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
  2089. return -EBUSY;
  2090. }
  2091. if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
  2092. return -EIO;
  2093. return 0;
  2094. }
  2095. static int
  2096. bnx2_init_5709_context(struct bnx2 *bp)
  2097. {
  2098. int i, ret = 0;
  2099. u32 val;
  2100. val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
  2101. val |= (BCM_PAGE_BITS - 8) << 16;
  2102. REG_WR(bp, BNX2_CTX_COMMAND, val);
  2103. for (i = 0; i < 10; i++) {
  2104. val = REG_RD(bp, BNX2_CTX_COMMAND);
  2105. if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
  2106. break;
  2107. udelay(2);
  2108. }
  2109. if (val & BNX2_CTX_COMMAND_MEM_INIT)
  2110. return -EBUSY;
  2111. for (i = 0; i < bp->ctx_pages; i++) {
  2112. int j;
  2113. if (bp->ctx_blk[i])
  2114. memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
  2115. else
  2116. return -ENOMEM;
  2117. REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
  2118. (bp->ctx_blk_mapping[i] & 0xffffffff) |
  2119. BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
  2120. REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
  2121. (u64) bp->ctx_blk_mapping[i] >> 32);
  2122. REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
  2123. BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
  2124. for (j = 0; j < 10; j++) {
  2125. val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
  2126. if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
  2127. break;
  2128. udelay(5);
  2129. }
  2130. if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
  2131. ret = -EBUSY;
  2132. break;
  2133. }
  2134. }
  2135. return ret;
  2136. }
  2137. static void
  2138. bnx2_init_context(struct bnx2 *bp)
  2139. {
  2140. u32 vcid;
  2141. vcid = 96;
  2142. while (vcid) {
  2143. u32 vcid_addr, pcid_addr, offset;
  2144. int i;
  2145. vcid--;
  2146. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  2147. u32 new_vcid;
  2148. vcid_addr = GET_PCID_ADDR(vcid);
  2149. if (vcid & 0x8) {
  2150. new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
  2151. }
  2152. else {
  2153. new_vcid = vcid;
  2154. }
  2155. pcid_addr = GET_PCID_ADDR(new_vcid);
  2156. }
  2157. else {
  2158. vcid_addr = GET_CID_ADDR(vcid);
  2159. pcid_addr = vcid_addr;
  2160. }
  2161. for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
  2162. vcid_addr += (i << PHY_CTX_SHIFT);
  2163. pcid_addr += (i << PHY_CTX_SHIFT);
  2164. REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
  2165. REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
  2166. /* Zero out the context. */
  2167. for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
  2168. bnx2_ctx_wr(bp, vcid_addr, offset, 0);
  2169. }
  2170. }
  2171. }
  2172. static int
  2173. bnx2_alloc_bad_rbuf(struct bnx2 *bp)
  2174. {
  2175. u16 *good_mbuf;
  2176. u32 good_mbuf_cnt;
  2177. u32 val;
  2178. good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
  2179. if (good_mbuf == NULL) {
  2180. pr_err("Failed to allocate memory in %s\n", __func__);
  2181. return -ENOMEM;
  2182. }
  2183. REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
  2184. BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
  2185. good_mbuf_cnt = 0;
  2186. /* Allocate a bunch of mbufs and save the good ones in an array. */
  2187. val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
  2188. while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
  2189. bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
  2190. BNX2_RBUF_COMMAND_ALLOC_REQ);
  2191. val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
  2192. val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
  2193. /* The addresses with Bit 9 set are bad memory blocks. */
  2194. if (!(val & (1 << 9))) {
  2195. good_mbuf[good_mbuf_cnt] = (u16) val;
  2196. good_mbuf_cnt++;
  2197. }
  2198. val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
  2199. }
  2200. /* Free the good ones back to the mbuf pool thus discarding
  2201. * all the bad ones. */
  2202. while (good_mbuf_cnt) {
  2203. good_mbuf_cnt--;
  2204. val = good_mbuf[good_mbuf_cnt];
  2205. val = (val << 9) | val | 1;
  2206. bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
  2207. }
  2208. kfree(good_mbuf);
  2209. return 0;
  2210. }
  2211. static void
  2212. bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
  2213. {
  2214. u32 val;
  2215. val = (mac_addr[0] << 8) | mac_addr[1];
  2216. REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
  2217. val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
  2218. (mac_addr[4] << 8) | mac_addr[5];
  2219. REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
  2220. }
  2221. static inline int
  2222. bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
  2223. {
  2224. dma_addr_t mapping;
  2225. struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
  2226. struct rx_bd *rxbd =
  2227. &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
  2228. struct page *page = alloc_page(gfp);
  2229. if (!page)
  2230. return -ENOMEM;
  2231. mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
  2232. PCI_DMA_FROMDEVICE);
  2233. if (dma_mapping_error(&bp->pdev->dev, mapping)) {
  2234. __free_page(page);
  2235. return -EIO;
  2236. }
  2237. rx_pg->page = page;
  2238. dma_unmap_addr_set(rx_pg, mapping, mapping);
  2239. rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
  2240. rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  2241. return 0;
  2242. }
  2243. static void
  2244. bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
  2245. {
  2246. struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
  2247. struct page *page = rx_pg->page;
  2248. if (!page)
  2249. return;
  2250. dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
  2251. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  2252. __free_page(page);
  2253. rx_pg->page = NULL;
  2254. }
  2255. static inline int
  2256. bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
  2257. {
  2258. struct sk_buff *skb;
  2259. struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
  2260. dma_addr_t mapping;
  2261. struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
  2262. unsigned long align;
  2263. skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
  2264. if (skb == NULL) {
  2265. return -ENOMEM;
  2266. }
  2267. if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
  2268. skb_reserve(skb, BNX2_RX_ALIGN - align);
  2269. mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
  2270. PCI_DMA_FROMDEVICE);
  2271. if (dma_mapping_error(&bp->pdev->dev, mapping)) {
  2272. dev_kfree_skb(skb);
  2273. return -EIO;
  2274. }
  2275. rx_buf->skb = skb;
  2276. rx_buf->desc = (struct l2_fhdr *) skb->data;
  2277. dma_unmap_addr_set(rx_buf, mapping, mapping);
  2278. rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
  2279. rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  2280. rxr->rx_prod_bseq += bp->rx_buf_use_size;
  2281. return 0;
  2282. }
  2283. static int
  2284. bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
  2285. {
  2286. struct status_block *sblk = bnapi->status_blk.msi;
  2287. u32 new_link_state, old_link_state;
  2288. int is_set = 1;
  2289. new_link_state = sblk->status_attn_bits & event;
  2290. old_link_state = sblk->status_attn_bits_ack & event;
  2291. if (new_link_state != old_link_state) {
  2292. if (new_link_state)
  2293. REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
  2294. else
  2295. REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
  2296. } else
  2297. is_set = 0;
  2298. return is_set;
  2299. }
  2300. static void
  2301. bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
  2302. {
  2303. spin_lock(&bp->phy_lock);
  2304. if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
  2305. bnx2_set_link(bp);
  2306. if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
  2307. bnx2_set_remote_link(bp);
  2308. spin_unlock(&bp->phy_lock);
  2309. }
  2310. static inline u16
  2311. bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
  2312. {
  2313. u16 cons;
  2314. /* Tell compiler that status block fields can change. */
  2315. barrier();
  2316. cons = *bnapi->hw_tx_cons_ptr;
  2317. barrier();
  2318. if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
  2319. cons++;
  2320. return cons;
  2321. }
  2322. static int
  2323. bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
  2324. {
  2325. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  2326. u16 hw_cons, sw_cons, sw_ring_cons;
  2327. int tx_pkt = 0, index;
  2328. struct netdev_queue *txq;
  2329. index = (bnapi - bp->bnx2_napi);
  2330. txq = netdev_get_tx_queue(bp->dev, index);
  2331. hw_cons = bnx2_get_hw_tx_cons(bnapi);
  2332. sw_cons = txr->tx_cons;
  2333. while (sw_cons != hw_cons) {
  2334. struct sw_tx_bd *tx_buf;
  2335. struct sk_buff *skb;
  2336. int i, last;
  2337. sw_ring_cons = TX_RING_IDX(sw_cons);
  2338. tx_buf = &txr->tx_buf_ring[sw_ring_cons];
  2339. skb = tx_buf->skb;
  2340. /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
  2341. prefetch(&skb->end);
  2342. /* partial BD completions possible with TSO packets */
  2343. if (tx_buf->is_gso) {
  2344. u16 last_idx, last_ring_idx;
  2345. last_idx = sw_cons + tx_buf->nr_frags + 1;
  2346. last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
  2347. if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
  2348. last_idx++;
  2349. }
  2350. if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
  2351. break;
  2352. }
  2353. }
  2354. dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
  2355. skb_headlen(skb), PCI_DMA_TODEVICE);
  2356. tx_buf->skb = NULL;
  2357. last = tx_buf->nr_frags;
  2358. for (i = 0; i < last; i++) {
  2359. sw_cons = NEXT_TX_BD(sw_cons);
  2360. dma_unmap_page(&bp->pdev->dev,
  2361. dma_unmap_addr(
  2362. &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
  2363. mapping),
  2364. skb_shinfo(skb)->frags[i].size,
  2365. PCI_DMA_TODEVICE);
  2366. }
  2367. sw_cons = NEXT_TX_BD(sw_cons);
  2368. dev_kfree_skb(skb);
  2369. tx_pkt++;
  2370. if (tx_pkt == budget)
  2371. break;
  2372. if (hw_cons == sw_cons)
  2373. hw_cons = bnx2_get_hw_tx_cons(bnapi);
  2374. }
  2375. txr->hw_tx_cons = hw_cons;
  2376. txr->tx_cons = sw_cons;
  2377. /* Need to make the tx_cons update visible to bnx2_start_xmit()
  2378. * before checking for netif_tx_queue_stopped(). Without the
  2379. * memory barrier, there is a small possibility that bnx2_start_xmit()
  2380. * will miss it and cause the queue to be stopped forever.
  2381. */
  2382. smp_mb();
  2383. if (unlikely(netif_tx_queue_stopped(txq)) &&
  2384. (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
  2385. __netif_tx_lock(txq, smp_processor_id());
  2386. if ((netif_tx_queue_stopped(txq)) &&
  2387. (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
  2388. netif_tx_wake_queue(txq);
  2389. __netif_tx_unlock(txq);
  2390. }
  2391. return tx_pkt;
  2392. }
  2393. static void
  2394. bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
  2395. struct sk_buff *skb, int count)
  2396. {
  2397. struct sw_pg *cons_rx_pg, *prod_rx_pg;
  2398. struct rx_bd *cons_bd, *prod_bd;
  2399. int i;
  2400. u16 hw_prod, prod;
  2401. u16 cons = rxr->rx_pg_cons;
  2402. cons_rx_pg = &rxr->rx_pg_ring[cons];
  2403. /* The caller was unable to allocate a new page to replace the
  2404. * last one in the frags array, so we need to recycle that page
  2405. * and then free the skb.
  2406. */
  2407. if (skb) {
  2408. struct page *page;
  2409. struct skb_shared_info *shinfo;
  2410. shinfo = skb_shinfo(skb);
  2411. shinfo->nr_frags--;
  2412. page = shinfo->frags[shinfo->nr_frags].page;
  2413. shinfo->frags[shinfo->nr_frags].page = NULL;
  2414. cons_rx_pg->page = page;
  2415. dev_kfree_skb(skb);
  2416. }
  2417. hw_prod = rxr->rx_pg_prod;
  2418. for (i = 0; i < count; i++) {
  2419. prod = RX_PG_RING_IDX(hw_prod);
  2420. prod_rx_pg = &rxr->rx_pg_ring[prod];
  2421. cons_rx_pg = &rxr->rx_pg_ring[cons];
  2422. cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
  2423. prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  2424. if (prod != cons) {
  2425. prod_rx_pg->page = cons_rx_pg->page;
  2426. cons_rx_pg->page = NULL;
  2427. dma_unmap_addr_set(prod_rx_pg, mapping,
  2428. dma_unmap_addr(cons_rx_pg, mapping));
  2429. prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
  2430. prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
  2431. }
  2432. cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
  2433. hw_prod = NEXT_RX_BD(hw_prod);
  2434. }
  2435. rxr->rx_pg_prod = hw_prod;
  2436. rxr->rx_pg_cons = cons;
  2437. }
  2438. static inline void
  2439. bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
  2440. struct sk_buff *skb, u16 cons, u16 prod)
  2441. {
  2442. struct sw_bd *cons_rx_buf, *prod_rx_buf;
  2443. struct rx_bd *cons_bd, *prod_bd;
  2444. cons_rx_buf = &rxr->rx_buf_ring[cons];
  2445. prod_rx_buf = &rxr->rx_buf_ring[prod];
  2446. dma_sync_single_for_device(&bp->pdev->dev,
  2447. dma_unmap_addr(cons_rx_buf, mapping),
  2448. BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
  2449. rxr->rx_prod_bseq += bp->rx_buf_use_size;
  2450. prod_rx_buf->skb = skb;
  2451. prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
  2452. if (cons == prod)
  2453. return;
  2454. dma_unmap_addr_set(prod_rx_buf, mapping,
  2455. dma_unmap_addr(cons_rx_buf, mapping));
  2456. cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
  2457. prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  2458. prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
  2459. prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
  2460. }
  2461. static int
  2462. bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
  2463. unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
  2464. u32 ring_idx)
  2465. {
  2466. int err;
  2467. u16 prod = ring_idx & 0xffff;
  2468. err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
  2469. if (unlikely(err)) {
  2470. bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
  2471. if (hdr_len) {
  2472. unsigned int raw_len = len + 4;
  2473. int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
  2474. bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
  2475. }
  2476. return err;
  2477. }
  2478. skb_reserve(skb, BNX2_RX_OFFSET);
  2479. dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
  2480. PCI_DMA_FROMDEVICE);
  2481. if (hdr_len == 0) {
  2482. skb_put(skb, len);
  2483. return 0;
  2484. } else {
  2485. unsigned int i, frag_len, frag_size, pages;
  2486. struct sw_pg *rx_pg;
  2487. u16 pg_cons = rxr->rx_pg_cons;
  2488. u16 pg_prod = rxr->rx_pg_prod;
  2489. frag_size = len + 4 - hdr_len;
  2490. pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
  2491. skb_put(skb, hdr_len);
  2492. for (i = 0; i < pages; i++) {
  2493. dma_addr_t mapping_old;
  2494. frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
  2495. if (unlikely(frag_len <= 4)) {
  2496. unsigned int tail = 4 - frag_len;
  2497. rxr->rx_pg_cons = pg_cons;
  2498. rxr->rx_pg_prod = pg_prod;
  2499. bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
  2500. pages - i);
  2501. skb->len -= tail;
  2502. if (i == 0) {
  2503. skb->tail -= tail;
  2504. } else {
  2505. skb_frag_t *frag =
  2506. &skb_shinfo(skb)->frags[i - 1];
  2507. frag->size -= tail;
  2508. skb->data_len -= tail;
  2509. skb->truesize -= tail;
  2510. }
  2511. return 0;
  2512. }
  2513. rx_pg = &rxr->rx_pg_ring[pg_cons];
  2514. /* Don't unmap yet. If we're unable to allocate a new
  2515. * page, we need to recycle the page and the DMA addr.
  2516. */
  2517. mapping_old = dma_unmap_addr(rx_pg, mapping);
  2518. if (i == pages - 1)
  2519. frag_len -= 4;
  2520. skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
  2521. rx_pg->page = NULL;
  2522. err = bnx2_alloc_rx_page(bp, rxr,
  2523. RX_PG_RING_IDX(pg_prod),
  2524. GFP_ATOMIC);
  2525. if (unlikely(err)) {
  2526. rxr->rx_pg_cons = pg_cons;
  2527. rxr->rx_pg_prod = pg_prod;
  2528. bnx2_reuse_rx_skb_pages(bp, rxr, skb,
  2529. pages - i);
  2530. return err;
  2531. }
  2532. dma_unmap_page(&bp->pdev->dev, mapping_old,
  2533. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  2534. frag_size -= frag_len;
  2535. skb->data_len += frag_len;
  2536. skb->truesize += frag_len;
  2537. skb->len += frag_len;
  2538. pg_prod = NEXT_RX_BD(pg_prod);
  2539. pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
  2540. }
  2541. rxr->rx_pg_prod = pg_prod;
  2542. rxr->rx_pg_cons = pg_cons;
  2543. }
  2544. return 0;
  2545. }
  2546. static inline u16
  2547. bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
  2548. {
  2549. u16 cons;
  2550. /* Tell compiler that status block fields can change. */
  2551. barrier();
  2552. cons = *bnapi->hw_rx_cons_ptr;
  2553. barrier();
  2554. if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
  2555. cons++;
  2556. return cons;
  2557. }
  2558. static int
  2559. bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
  2560. {
  2561. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  2562. u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
  2563. struct l2_fhdr *rx_hdr;
  2564. int rx_pkt = 0, pg_ring_used = 0;
  2565. hw_cons = bnx2_get_hw_rx_cons(bnapi);
  2566. sw_cons = rxr->rx_cons;
  2567. sw_prod = rxr->rx_prod;
  2568. /* Memory barrier necessary as speculative reads of the rx
  2569. * buffer can be ahead of the index in the status block
  2570. */
  2571. rmb();
  2572. while (sw_cons != hw_cons) {
  2573. unsigned int len, hdr_len;
  2574. u32 status;
  2575. struct sw_bd *rx_buf, *next_rx_buf;
  2576. struct sk_buff *skb;
  2577. dma_addr_t dma_addr;
  2578. sw_ring_cons = RX_RING_IDX(sw_cons);
  2579. sw_ring_prod = RX_RING_IDX(sw_prod);
  2580. rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
  2581. skb = rx_buf->skb;
  2582. prefetchw(skb);
  2583. next_rx_buf =
  2584. &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
  2585. prefetch(next_rx_buf->desc);
  2586. rx_buf->skb = NULL;
  2587. dma_addr = dma_unmap_addr(rx_buf, mapping);
  2588. dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
  2589. BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
  2590. PCI_DMA_FROMDEVICE);
  2591. rx_hdr = rx_buf->desc;
  2592. len = rx_hdr->l2_fhdr_pkt_len;
  2593. status = rx_hdr->l2_fhdr_status;
  2594. hdr_len = 0;
  2595. if (status & L2_FHDR_STATUS_SPLIT) {
  2596. hdr_len = rx_hdr->l2_fhdr_ip_xsum;
  2597. pg_ring_used = 1;
  2598. } else if (len > bp->rx_jumbo_thresh) {
  2599. hdr_len = bp->rx_jumbo_thresh;
  2600. pg_ring_used = 1;
  2601. }
  2602. if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
  2603. L2_FHDR_ERRORS_PHY_DECODE |
  2604. L2_FHDR_ERRORS_ALIGNMENT |
  2605. L2_FHDR_ERRORS_TOO_SHORT |
  2606. L2_FHDR_ERRORS_GIANT_FRAME))) {
  2607. bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
  2608. sw_ring_prod);
  2609. if (pg_ring_used) {
  2610. int pages;
  2611. pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
  2612. bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
  2613. }
  2614. goto next_rx;
  2615. }
  2616. len -= 4;
  2617. if (len <= bp->rx_copy_thresh) {
  2618. struct sk_buff *new_skb;
  2619. new_skb = netdev_alloc_skb(bp->dev, len + 6);
  2620. if (new_skb == NULL) {
  2621. bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
  2622. sw_ring_prod);
  2623. goto next_rx;
  2624. }
  2625. /* aligned copy */
  2626. skb_copy_from_linear_data_offset(skb,
  2627. BNX2_RX_OFFSET - 6,
  2628. new_skb->data, len + 6);
  2629. skb_reserve(new_skb, 6);
  2630. skb_put(new_skb, len);
  2631. bnx2_reuse_rx_skb(bp, rxr, skb,
  2632. sw_ring_cons, sw_ring_prod);
  2633. skb = new_skb;
  2634. } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
  2635. dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
  2636. goto next_rx;
  2637. if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
  2638. !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
  2639. __vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
  2640. skb->protocol = eth_type_trans(skb, bp->dev);
  2641. if ((len > (bp->dev->mtu + ETH_HLEN)) &&
  2642. (ntohs(skb->protocol) != 0x8100)) {
  2643. dev_kfree_skb(skb);
  2644. goto next_rx;
  2645. }
  2646. skb_checksum_none_assert(skb);
  2647. if ((bp->dev->features & NETIF_F_RXCSUM) &&
  2648. (status & (L2_FHDR_STATUS_TCP_SEGMENT |
  2649. L2_FHDR_STATUS_UDP_DATAGRAM))) {
  2650. if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
  2651. L2_FHDR_ERRORS_UDP_XSUM)) == 0))
  2652. skb->ip_summed = CHECKSUM_UNNECESSARY;
  2653. }
  2654. if ((bp->dev->features & NETIF_F_RXHASH) &&
  2655. ((status & L2_FHDR_STATUS_USE_RXHASH) ==
  2656. L2_FHDR_STATUS_USE_RXHASH))
  2657. skb->rxhash = rx_hdr->l2_fhdr_hash;
  2658. skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
  2659. napi_gro_receive(&bnapi->napi, skb);
  2660. rx_pkt++;
  2661. next_rx:
  2662. sw_cons = NEXT_RX_BD(sw_cons);
  2663. sw_prod = NEXT_RX_BD(sw_prod);
  2664. if ((rx_pkt == budget))
  2665. break;
  2666. /* Refresh hw_cons to see if there is new work */
  2667. if (sw_cons == hw_cons) {
  2668. hw_cons = bnx2_get_hw_rx_cons(bnapi);
  2669. rmb();
  2670. }
  2671. }
  2672. rxr->rx_cons = sw_cons;
  2673. rxr->rx_prod = sw_prod;
  2674. if (pg_ring_used)
  2675. REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
  2676. REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
  2677. REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
  2678. mmiowb();
  2679. return rx_pkt;
  2680. }
  2681. /* MSI ISR - The only difference between this and the INTx ISR
  2682. * is that the MSI interrupt is always serviced.
  2683. */
  2684. static irqreturn_t
  2685. bnx2_msi(int irq, void *dev_instance)
  2686. {
  2687. struct bnx2_napi *bnapi = dev_instance;
  2688. struct bnx2 *bp = bnapi->bp;
  2689. prefetch(bnapi->status_blk.msi);
  2690. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2691. BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
  2692. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  2693. /* Return here if interrupt is disabled. */
  2694. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  2695. return IRQ_HANDLED;
  2696. napi_schedule(&bnapi->napi);
  2697. return IRQ_HANDLED;
  2698. }
  2699. static irqreturn_t
  2700. bnx2_msi_1shot(int irq, void *dev_instance)
  2701. {
  2702. struct bnx2_napi *bnapi = dev_instance;
  2703. struct bnx2 *bp = bnapi->bp;
  2704. prefetch(bnapi->status_blk.msi);
  2705. /* Return here if interrupt is disabled. */
  2706. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  2707. return IRQ_HANDLED;
  2708. napi_schedule(&bnapi->napi);
  2709. return IRQ_HANDLED;
  2710. }
  2711. static irqreturn_t
  2712. bnx2_interrupt(int irq, void *dev_instance)
  2713. {
  2714. struct bnx2_napi *bnapi = dev_instance;
  2715. struct bnx2 *bp = bnapi->bp;
  2716. struct status_block *sblk = bnapi->status_blk.msi;
  2717. /* When using INTx, it is possible for the interrupt to arrive
  2718. * at the CPU before the status block posted prior to the
  2719. * interrupt. Reading a register will flush the status block.
  2720. * When using MSI, the MSI message will always complete after
  2721. * the status block write.
  2722. */
  2723. if ((sblk->status_idx == bnapi->last_status_idx) &&
  2724. (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
  2725. BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
  2726. return IRQ_NONE;
  2727. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2728. BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
  2729. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  2730. /* Read back to deassert IRQ immediately to avoid too many
  2731. * spurious interrupts.
  2732. */
  2733. REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
  2734. /* Return here if interrupt is shared and is disabled. */
  2735. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  2736. return IRQ_HANDLED;
  2737. if (napi_schedule_prep(&bnapi->napi)) {
  2738. bnapi->last_status_idx = sblk->status_idx;
  2739. __napi_schedule(&bnapi->napi);
  2740. }
  2741. return IRQ_HANDLED;
  2742. }
  2743. static inline int
  2744. bnx2_has_fast_work(struct bnx2_napi *bnapi)
  2745. {
  2746. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  2747. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  2748. if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
  2749. (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
  2750. return 1;
  2751. return 0;
  2752. }
  2753. #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
  2754. STATUS_ATTN_BITS_TIMER_ABORT)
  2755. static inline int
  2756. bnx2_has_work(struct bnx2_napi *bnapi)
  2757. {
  2758. struct status_block *sblk = bnapi->status_blk.msi;
  2759. if (bnx2_has_fast_work(bnapi))
  2760. return 1;
  2761. #ifdef BCM_CNIC
  2762. if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
  2763. return 1;
  2764. #endif
  2765. if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
  2766. (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
  2767. return 1;
  2768. return 0;
  2769. }
  2770. static void
  2771. bnx2_chk_missed_msi(struct bnx2 *bp)
  2772. {
  2773. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  2774. u32 msi_ctrl;
  2775. if (bnx2_has_work(bnapi)) {
  2776. msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
  2777. if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
  2778. return;
  2779. if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
  2780. REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
  2781. ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
  2782. REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
  2783. bnx2_msi(bp->irq_tbl[0].vector, bnapi);
  2784. }
  2785. }
  2786. bp->idle_chk_status_idx = bnapi->last_status_idx;
  2787. }
  2788. #ifdef BCM_CNIC
  2789. static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
  2790. {
  2791. struct cnic_ops *c_ops;
  2792. if (!bnapi->cnic_present)
  2793. return;
  2794. rcu_read_lock();
  2795. c_ops = rcu_dereference(bp->cnic_ops);
  2796. if (c_ops)
  2797. bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
  2798. bnapi->status_blk.msi);
  2799. rcu_read_unlock();
  2800. }
  2801. #endif
  2802. static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
  2803. {
  2804. struct status_block *sblk = bnapi->status_blk.msi;
  2805. u32 status_attn_bits = sblk->status_attn_bits;
  2806. u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
  2807. if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
  2808. (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
  2809. bnx2_phy_int(bp, bnapi);
  2810. /* This is needed to take care of transient status
  2811. * during link changes.
  2812. */
  2813. REG_WR(bp, BNX2_HC_COMMAND,
  2814. bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
  2815. REG_RD(bp, BNX2_HC_COMMAND);
  2816. }
  2817. }
  2818. static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
  2819. int work_done, int budget)
  2820. {
  2821. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  2822. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  2823. if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
  2824. bnx2_tx_int(bp, bnapi, 0);
  2825. if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
  2826. work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
  2827. return work_done;
  2828. }
  2829. static int bnx2_poll_msix(struct napi_struct *napi, int budget)
  2830. {
  2831. struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
  2832. struct bnx2 *bp = bnapi->bp;
  2833. int work_done = 0;
  2834. struct status_block_msix *sblk = bnapi->status_blk.msix;
  2835. while (1) {
  2836. work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
  2837. if (unlikely(work_done >= budget))
  2838. break;
  2839. bnapi->last_status_idx = sblk->status_idx;
  2840. /* status idx must be read before checking for more work. */
  2841. rmb();
  2842. if (likely(!bnx2_has_fast_work(bnapi))) {
  2843. napi_complete(napi);
  2844. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  2845. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2846. bnapi->last_status_idx);
  2847. break;
  2848. }
  2849. }
  2850. return work_done;
  2851. }
  2852. static int bnx2_poll(struct napi_struct *napi, int budget)
  2853. {
  2854. struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
  2855. struct bnx2 *bp = bnapi->bp;
  2856. int work_done = 0;
  2857. struct status_block *sblk = bnapi->status_blk.msi;
  2858. while (1) {
  2859. bnx2_poll_link(bp, bnapi);
  2860. work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
  2861. #ifdef BCM_CNIC
  2862. bnx2_poll_cnic(bp, bnapi);
  2863. #endif
  2864. /* bnapi->last_status_idx is used below to tell the hw how
  2865. * much work has been processed, so we must read it before
  2866. * checking for more work.
  2867. */
  2868. bnapi->last_status_idx = sblk->status_idx;
  2869. if (unlikely(work_done >= budget))
  2870. break;
  2871. rmb();
  2872. if (likely(!bnx2_has_work(bnapi))) {
  2873. napi_complete(napi);
  2874. if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
  2875. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2876. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2877. bnapi->last_status_idx);
  2878. break;
  2879. }
  2880. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2881. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2882. BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
  2883. bnapi->last_status_idx);
  2884. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
  2885. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  2886. bnapi->last_status_idx);
  2887. break;
  2888. }
  2889. }
  2890. return work_done;
  2891. }
  2892. /* Called with rtnl_lock from vlan functions and also netif_tx_lock
  2893. * from set_multicast.
  2894. */
  2895. static void
  2896. bnx2_set_rx_mode(struct net_device *dev)
  2897. {
  2898. struct bnx2 *bp = netdev_priv(dev);
  2899. u32 rx_mode, sort_mode;
  2900. struct netdev_hw_addr *ha;
  2901. int i;
  2902. if (!netif_running(dev))
  2903. return;
  2904. spin_lock_bh(&bp->phy_lock);
  2905. rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
  2906. BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
  2907. sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
  2908. if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
  2909. (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
  2910. rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
  2911. if (dev->flags & IFF_PROMISC) {
  2912. /* Promiscuous mode. */
  2913. rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
  2914. sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
  2915. BNX2_RPM_SORT_USER0_PROM_VLAN;
  2916. }
  2917. else if (dev->flags & IFF_ALLMULTI) {
  2918. for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
  2919. REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
  2920. 0xffffffff);
  2921. }
  2922. sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
  2923. }
  2924. else {
  2925. /* Accept one or more multicast(s). */
  2926. u32 mc_filter[NUM_MC_HASH_REGISTERS];
  2927. u32 regidx;
  2928. u32 bit;
  2929. u32 crc;
  2930. memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
  2931. netdev_for_each_mc_addr(ha, dev) {
  2932. crc = ether_crc_le(ETH_ALEN, ha->addr);
  2933. bit = crc & 0xff;
  2934. regidx = (bit & 0xe0) >> 5;
  2935. bit &= 0x1f;
  2936. mc_filter[regidx] |= (1 << bit);
  2937. }
  2938. for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
  2939. REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
  2940. mc_filter[i]);
  2941. }
  2942. sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
  2943. }
  2944. if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
  2945. rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
  2946. sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
  2947. BNX2_RPM_SORT_USER0_PROM_VLAN;
  2948. } else if (!(dev->flags & IFF_PROMISC)) {
  2949. /* Add all entries into to the match filter list */
  2950. i = 0;
  2951. netdev_for_each_uc_addr(ha, dev) {
  2952. bnx2_set_mac_addr(bp, ha->addr,
  2953. i + BNX2_START_UNICAST_ADDRESS_INDEX);
  2954. sort_mode |= (1 <<
  2955. (i + BNX2_START_UNICAST_ADDRESS_INDEX));
  2956. i++;
  2957. }
  2958. }
  2959. if (rx_mode != bp->rx_mode) {
  2960. bp->rx_mode = rx_mode;
  2961. REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
  2962. }
  2963. REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
  2964. REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
  2965. REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
  2966. spin_unlock_bh(&bp->phy_lock);
  2967. }
  2968. static int __devinit
  2969. check_fw_section(const struct firmware *fw,
  2970. const struct bnx2_fw_file_section *section,
  2971. u32 alignment, bool non_empty)
  2972. {
  2973. u32 offset = be32_to_cpu(section->offset);
  2974. u32 len = be32_to_cpu(section->len);
  2975. if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
  2976. return -EINVAL;
  2977. if ((non_empty && len == 0) || len > fw->size - offset ||
  2978. len & (alignment - 1))
  2979. return -EINVAL;
  2980. return 0;
  2981. }
  2982. static int __devinit
  2983. check_mips_fw_entry(const struct firmware *fw,
  2984. const struct bnx2_mips_fw_file_entry *entry)
  2985. {
  2986. if (check_fw_section(fw, &entry->text, 4, true) ||
  2987. check_fw_section(fw, &entry->data, 4, false) ||
  2988. check_fw_section(fw, &entry->rodata, 4, false))
  2989. return -EINVAL;
  2990. return 0;
  2991. }
  2992. static int __devinit
  2993. bnx2_request_firmware(struct bnx2 *bp)
  2994. {
  2995. const char *mips_fw_file, *rv2p_fw_file;
  2996. const struct bnx2_mips_fw_file *mips_fw;
  2997. const struct bnx2_rv2p_fw_file *rv2p_fw;
  2998. int rc;
  2999. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3000. mips_fw_file = FW_MIPS_FILE_09;
  3001. if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
  3002. (CHIP_ID(bp) == CHIP_ID_5709_A1))
  3003. rv2p_fw_file = FW_RV2P_FILE_09_Ax;
  3004. else
  3005. rv2p_fw_file = FW_RV2P_FILE_09;
  3006. } else {
  3007. mips_fw_file = FW_MIPS_FILE_06;
  3008. rv2p_fw_file = FW_RV2P_FILE_06;
  3009. }
  3010. rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
  3011. if (rc) {
  3012. pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
  3013. return rc;
  3014. }
  3015. rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
  3016. if (rc) {
  3017. pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
  3018. return rc;
  3019. }
  3020. mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
  3021. rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
  3022. if (bp->mips_firmware->size < sizeof(*mips_fw) ||
  3023. check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
  3024. check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
  3025. check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
  3026. check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
  3027. check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
  3028. pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
  3029. return -EINVAL;
  3030. }
  3031. if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
  3032. check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
  3033. check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
  3034. pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
  3035. return -EINVAL;
  3036. }
  3037. return 0;
  3038. }
  3039. static u32
  3040. rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
  3041. {
  3042. switch (idx) {
  3043. case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
  3044. rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
  3045. rv2p_code |= RV2P_BD_PAGE_SIZE;
  3046. break;
  3047. }
  3048. return rv2p_code;
  3049. }
  3050. static int
  3051. load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
  3052. const struct bnx2_rv2p_fw_file_entry *fw_entry)
  3053. {
  3054. u32 rv2p_code_len, file_offset;
  3055. __be32 *rv2p_code;
  3056. int i;
  3057. u32 val, cmd, addr;
  3058. rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
  3059. file_offset = be32_to_cpu(fw_entry->rv2p.offset);
  3060. rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
  3061. if (rv2p_proc == RV2P_PROC1) {
  3062. cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
  3063. addr = BNX2_RV2P_PROC1_ADDR_CMD;
  3064. } else {
  3065. cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
  3066. addr = BNX2_RV2P_PROC2_ADDR_CMD;
  3067. }
  3068. for (i = 0; i < rv2p_code_len; i += 8) {
  3069. REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
  3070. rv2p_code++;
  3071. REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
  3072. rv2p_code++;
  3073. val = (i / 8) | cmd;
  3074. REG_WR(bp, addr, val);
  3075. }
  3076. rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
  3077. for (i = 0; i < 8; i++) {
  3078. u32 loc, code;
  3079. loc = be32_to_cpu(fw_entry->fixup[i]);
  3080. if (loc && ((loc * 4) < rv2p_code_len)) {
  3081. code = be32_to_cpu(*(rv2p_code + loc - 1));
  3082. REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
  3083. code = be32_to_cpu(*(rv2p_code + loc));
  3084. code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
  3085. REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
  3086. val = (loc / 2) | cmd;
  3087. REG_WR(bp, addr, val);
  3088. }
  3089. }
  3090. /* Reset the processor, un-stall is done later. */
  3091. if (rv2p_proc == RV2P_PROC1) {
  3092. REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
  3093. }
  3094. else {
  3095. REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
  3096. }
  3097. return 0;
  3098. }
  3099. static int
  3100. load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
  3101. const struct bnx2_mips_fw_file_entry *fw_entry)
  3102. {
  3103. u32 addr, len, file_offset;
  3104. __be32 *data;
  3105. u32 offset;
  3106. u32 val;
  3107. /* Halt the CPU. */
  3108. val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
  3109. val |= cpu_reg->mode_value_halt;
  3110. bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
  3111. bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
  3112. /* Load the Text area. */
  3113. addr = be32_to_cpu(fw_entry->text.addr);
  3114. len = be32_to_cpu(fw_entry->text.len);
  3115. file_offset = be32_to_cpu(fw_entry->text.offset);
  3116. data = (__be32 *)(bp->mips_firmware->data + file_offset);
  3117. offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
  3118. if (len) {
  3119. int j;
  3120. for (j = 0; j < (len / 4); j++, offset += 4)
  3121. bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
  3122. }
  3123. /* Load the Data area. */
  3124. addr = be32_to_cpu(fw_entry->data.addr);
  3125. len = be32_to_cpu(fw_entry->data.len);
  3126. file_offset = be32_to_cpu(fw_entry->data.offset);
  3127. data = (__be32 *)(bp->mips_firmware->data + file_offset);
  3128. offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
  3129. if (len) {
  3130. int j;
  3131. for (j = 0; j < (len / 4); j++, offset += 4)
  3132. bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
  3133. }
  3134. /* Load the Read-Only area. */
  3135. addr = be32_to_cpu(fw_entry->rodata.addr);
  3136. len = be32_to_cpu(fw_entry->rodata.len);
  3137. file_offset = be32_to_cpu(fw_entry->rodata.offset);
  3138. data = (__be32 *)(bp->mips_firmware->data + file_offset);
  3139. offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
  3140. if (len) {
  3141. int j;
  3142. for (j = 0; j < (len / 4); j++, offset += 4)
  3143. bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
  3144. }
  3145. /* Clear the pre-fetch instruction. */
  3146. bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
  3147. val = be32_to_cpu(fw_entry->start_addr);
  3148. bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
  3149. /* Start the CPU. */
  3150. val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
  3151. val &= ~cpu_reg->mode_value_halt;
  3152. bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
  3153. bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
  3154. return 0;
  3155. }
  3156. static int
  3157. bnx2_init_cpus(struct bnx2 *bp)
  3158. {
  3159. const struct bnx2_mips_fw_file *mips_fw =
  3160. (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
  3161. const struct bnx2_rv2p_fw_file *rv2p_fw =
  3162. (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
  3163. int rc;
  3164. /* Initialize the RV2P processor. */
  3165. load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
  3166. load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
  3167. /* Initialize the RX Processor. */
  3168. rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
  3169. if (rc)
  3170. goto init_cpu_err;
  3171. /* Initialize the TX Processor. */
  3172. rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
  3173. if (rc)
  3174. goto init_cpu_err;
  3175. /* Initialize the TX Patch-up Processor. */
  3176. rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
  3177. if (rc)
  3178. goto init_cpu_err;
  3179. /* Initialize the Completion Processor. */
  3180. rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
  3181. if (rc)
  3182. goto init_cpu_err;
  3183. /* Initialize the Command Processor. */
  3184. rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
  3185. init_cpu_err:
  3186. return rc;
  3187. }
  3188. static int
  3189. bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
  3190. {
  3191. u16 pmcsr;
  3192. pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
  3193. switch (state) {
  3194. case PCI_D0: {
  3195. u32 val;
  3196. pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
  3197. (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
  3198. PCI_PM_CTRL_PME_STATUS);
  3199. if (pmcsr & PCI_PM_CTRL_STATE_MASK)
  3200. /* delay required during transition out of D3hot */
  3201. msleep(20);
  3202. val = REG_RD(bp, BNX2_EMAC_MODE);
  3203. val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
  3204. val &= ~BNX2_EMAC_MODE_MPKT;
  3205. REG_WR(bp, BNX2_EMAC_MODE, val);
  3206. val = REG_RD(bp, BNX2_RPM_CONFIG);
  3207. val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
  3208. REG_WR(bp, BNX2_RPM_CONFIG, val);
  3209. break;
  3210. }
  3211. case PCI_D3hot: {
  3212. int i;
  3213. u32 val, wol_msg;
  3214. if (bp->wol) {
  3215. u32 advertising;
  3216. u8 autoneg;
  3217. autoneg = bp->autoneg;
  3218. advertising = bp->advertising;
  3219. if (bp->phy_port == PORT_TP) {
  3220. bp->autoneg = AUTONEG_SPEED;
  3221. bp->advertising = ADVERTISED_10baseT_Half |
  3222. ADVERTISED_10baseT_Full |
  3223. ADVERTISED_100baseT_Half |
  3224. ADVERTISED_100baseT_Full |
  3225. ADVERTISED_Autoneg;
  3226. }
  3227. spin_lock_bh(&bp->phy_lock);
  3228. bnx2_setup_phy(bp, bp->phy_port);
  3229. spin_unlock_bh(&bp->phy_lock);
  3230. bp->autoneg = autoneg;
  3231. bp->advertising = advertising;
  3232. bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
  3233. val = REG_RD(bp, BNX2_EMAC_MODE);
  3234. /* Enable port mode. */
  3235. val &= ~BNX2_EMAC_MODE_PORT;
  3236. val |= BNX2_EMAC_MODE_MPKT_RCVD |
  3237. BNX2_EMAC_MODE_ACPI_RCVD |
  3238. BNX2_EMAC_MODE_MPKT;
  3239. if (bp->phy_port == PORT_TP)
  3240. val |= BNX2_EMAC_MODE_PORT_MII;
  3241. else {
  3242. val |= BNX2_EMAC_MODE_PORT_GMII;
  3243. if (bp->line_speed == SPEED_2500)
  3244. val |= BNX2_EMAC_MODE_25G_MODE;
  3245. }
  3246. REG_WR(bp, BNX2_EMAC_MODE, val);
  3247. /* receive all multicast */
  3248. for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
  3249. REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
  3250. 0xffffffff);
  3251. }
  3252. REG_WR(bp, BNX2_EMAC_RX_MODE,
  3253. BNX2_EMAC_RX_MODE_SORT_MODE);
  3254. val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
  3255. BNX2_RPM_SORT_USER0_MC_EN;
  3256. REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
  3257. REG_WR(bp, BNX2_RPM_SORT_USER0, val);
  3258. REG_WR(bp, BNX2_RPM_SORT_USER0, val |
  3259. BNX2_RPM_SORT_USER0_ENA);
  3260. /* Need to enable EMAC and RPM for WOL. */
  3261. REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
  3262. BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
  3263. BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
  3264. BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
  3265. val = REG_RD(bp, BNX2_RPM_CONFIG);
  3266. val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
  3267. REG_WR(bp, BNX2_RPM_CONFIG, val);
  3268. wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
  3269. }
  3270. else {
  3271. wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
  3272. }
  3273. if (!(bp->flags & BNX2_FLAG_NO_WOL))
  3274. bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
  3275. 1, 0);
  3276. pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
  3277. if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
  3278. (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
  3279. if (bp->wol)
  3280. pmcsr |= 3;
  3281. }
  3282. else {
  3283. pmcsr |= 3;
  3284. }
  3285. if (bp->wol) {
  3286. pmcsr |= PCI_PM_CTRL_PME_ENABLE;
  3287. }
  3288. pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
  3289. pmcsr);
  3290. /* No more memory access after this point until
  3291. * device is brought back to D0.
  3292. */
  3293. udelay(50);
  3294. break;
  3295. }
  3296. default:
  3297. return -EINVAL;
  3298. }
  3299. return 0;
  3300. }
  3301. static int
  3302. bnx2_acquire_nvram_lock(struct bnx2 *bp)
  3303. {
  3304. u32 val;
  3305. int j;
  3306. /* Request access to the flash interface. */
  3307. REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
  3308. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3309. val = REG_RD(bp, BNX2_NVM_SW_ARB);
  3310. if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
  3311. break;
  3312. udelay(5);
  3313. }
  3314. if (j >= NVRAM_TIMEOUT_COUNT)
  3315. return -EBUSY;
  3316. return 0;
  3317. }
  3318. static int
  3319. bnx2_release_nvram_lock(struct bnx2 *bp)
  3320. {
  3321. int j;
  3322. u32 val;
  3323. /* Relinquish nvram interface. */
  3324. REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
  3325. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3326. val = REG_RD(bp, BNX2_NVM_SW_ARB);
  3327. if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
  3328. break;
  3329. udelay(5);
  3330. }
  3331. if (j >= NVRAM_TIMEOUT_COUNT)
  3332. return -EBUSY;
  3333. return 0;
  3334. }
  3335. static int
  3336. bnx2_enable_nvram_write(struct bnx2 *bp)
  3337. {
  3338. u32 val;
  3339. val = REG_RD(bp, BNX2_MISC_CFG);
  3340. REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
  3341. if (bp->flash_info->flags & BNX2_NV_WREN) {
  3342. int j;
  3343. REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3344. REG_WR(bp, BNX2_NVM_COMMAND,
  3345. BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
  3346. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3347. udelay(5);
  3348. val = REG_RD(bp, BNX2_NVM_COMMAND);
  3349. if (val & BNX2_NVM_COMMAND_DONE)
  3350. break;
  3351. }
  3352. if (j >= NVRAM_TIMEOUT_COUNT)
  3353. return -EBUSY;
  3354. }
  3355. return 0;
  3356. }
  3357. static void
  3358. bnx2_disable_nvram_write(struct bnx2 *bp)
  3359. {
  3360. u32 val;
  3361. val = REG_RD(bp, BNX2_MISC_CFG);
  3362. REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
  3363. }
  3364. static void
  3365. bnx2_enable_nvram_access(struct bnx2 *bp)
  3366. {
  3367. u32 val;
  3368. val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
  3369. /* Enable both bits, even on read. */
  3370. REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
  3371. val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
  3372. }
  3373. static void
  3374. bnx2_disable_nvram_access(struct bnx2 *bp)
  3375. {
  3376. u32 val;
  3377. val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
  3378. /* Disable both bits, even after read. */
  3379. REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
  3380. val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
  3381. BNX2_NVM_ACCESS_ENABLE_WR_EN));
  3382. }
  3383. static int
  3384. bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
  3385. {
  3386. u32 cmd;
  3387. int j;
  3388. if (bp->flash_info->flags & BNX2_NV_BUFFERED)
  3389. /* Buffered flash, no erase needed */
  3390. return 0;
  3391. /* Build an erase command */
  3392. cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
  3393. BNX2_NVM_COMMAND_DOIT;
  3394. /* Need to clear DONE bit separately. */
  3395. REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3396. /* Address of the NVRAM to read from. */
  3397. REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
  3398. /* Issue an erase command. */
  3399. REG_WR(bp, BNX2_NVM_COMMAND, cmd);
  3400. /* Wait for completion. */
  3401. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3402. u32 val;
  3403. udelay(5);
  3404. val = REG_RD(bp, BNX2_NVM_COMMAND);
  3405. if (val & BNX2_NVM_COMMAND_DONE)
  3406. break;
  3407. }
  3408. if (j >= NVRAM_TIMEOUT_COUNT)
  3409. return -EBUSY;
  3410. return 0;
  3411. }
  3412. static int
  3413. bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
  3414. {
  3415. u32 cmd;
  3416. int j;
  3417. /* Build the command word. */
  3418. cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
  3419. /* Calculate an offset of a buffered flash, not needed for 5709. */
  3420. if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
  3421. offset = ((offset / bp->flash_info->page_size) <<
  3422. bp->flash_info->page_bits) +
  3423. (offset % bp->flash_info->page_size);
  3424. }
  3425. /* Need to clear DONE bit separately. */
  3426. REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3427. /* Address of the NVRAM to read from. */
  3428. REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
  3429. /* Issue a read command. */
  3430. REG_WR(bp, BNX2_NVM_COMMAND, cmd);
  3431. /* Wait for completion. */
  3432. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3433. u32 val;
  3434. udelay(5);
  3435. val = REG_RD(bp, BNX2_NVM_COMMAND);
  3436. if (val & BNX2_NVM_COMMAND_DONE) {
  3437. __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
  3438. memcpy(ret_val, &v, 4);
  3439. break;
  3440. }
  3441. }
  3442. if (j >= NVRAM_TIMEOUT_COUNT)
  3443. return -EBUSY;
  3444. return 0;
  3445. }
  3446. static int
  3447. bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
  3448. {
  3449. u32 cmd;
  3450. __be32 val32;
  3451. int j;
  3452. /* Build the command word. */
  3453. cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
  3454. /* Calculate an offset of a buffered flash, not needed for 5709. */
  3455. if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
  3456. offset = ((offset / bp->flash_info->page_size) <<
  3457. bp->flash_info->page_bits) +
  3458. (offset % bp->flash_info->page_size);
  3459. }
  3460. /* Need to clear DONE bit separately. */
  3461. REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
  3462. memcpy(&val32, val, 4);
  3463. /* Write the data. */
  3464. REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
  3465. /* Address of the NVRAM to write to. */
  3466. REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
  3467. /* Issue the write command. */
  3468. REG_WR(bp, BNX2_NVM_COMMAND, cmd);
  3469. /* Wait for completion. */
  3470. for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
  3471. udelay(5);
  3472. if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
  3473. break;
  3474. }
  3475. if (j >= NVRAM_TIMEOUT_COUNT)
  3476. return -EBUSY;
  3477. return 0;
  3478. }
  3479. static int
  3480. bnx2_init_nvram(struct bnx2 *bp)
  3481. {
  3482. u32 val;
  3483. int j, entry_count, rc = 0;
  3484. const struct flash_spec *flash;
  3485. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3486. bp->flash_info = &flash_5709;
  3487. goto get_flash_size;
  3488. }
  3489. /* Determine the selected interface. */
  3490. val = REG_RD(bp, BNX2_NVM_CFG1);
  3491. entry_count = ARRAY_SIZE(flash_table);
  3492. if (val & 0x40000000) {
  3493. /* Flash interface has been reconfigured */
  3494. for (j = 0, flash = &flash_table[0]; j < entry_count;
  3495. j++, flash++) {
  3496. if ((val & FLASH_BACKUP_STRAP_MASK) ==
  3497. (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
  3498. bp->flash_info = flash;
  3499. break;
  3500. }
  3501. }
  3502. }
  3503. else {
  3504. u32 mask;
  3505. /* Not yet been reconfigured */
  3506. if (val & (1 << 23))
  3507. mask = FLASH_BACKUP_STRAP_MASK;
  3508. else
  3509. mask = FLASH_STRAP_MASK;
  3510. for (j = 0, flash = &flash_table[0]; j < entry_count;
  3511. j++, flash++) {
  3512. if ((val & mask) == (flash->strapping & mask)) {
  3513. bp->flash_info = flash;
  3514. /* Request access to the flash interface. */
  3515. if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
  3516. return rc;
  3517. /* Enable access to flash interface */
  3518. bnx2_enable_nvram_access(bp);
  3519. /* Reconfigure the flash interface */
  3520. REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
  3521. REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
  3522. REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
  3523. REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
  3524. /* Disable access to flash interface */
  3525. bnx2_disable_nvram_access(bp);
  3526. bnx2_release_nvram_lock(bp);
  3527. break;
  3528. }
  3529. }
  3530. } /* if (val & 0x40000000) */
  3531. if (j == entry_count) {
  3532. bp->flash_info = NULL;
  3533. pr_alert("Unknown flash/EEPROM type\n");
  3534. return -ENODEV;
  3535. }
  3536. get_flash_size:
  3537. val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
  3538. val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
  3539. if (val)
  3540. bp->flash_size = val;
  3541. else
  3542. bp->flash_size = bp->flash_info->total_size;
  3543. return rc;
  3544. }
  3545. static int
  3546. bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
  3547. int buf_size)
  3548. {
  3549. int rc = 0;
  3550. u32 cmd_flags, offset32, len32, extra;
  3551. if (buf_size == 0)
  3552. return 0;
  3553. /* Request access to the flash interface. */
  3554. if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
  3555. return rc;
  3556. /* Enable access to flash interface */
  3557. bnx2_enable_nvram_access(bp);
  3558. len32 = buf_size;
  3559. offset32 = offset;
  3560. extra = 0;
  3561. cmd_flags = 0;
  3562. if (offset32 & 3) {
  3563. u8 buf[4];
  3564. u32 pre_len;
  3565. offset32 &= ~3;
  3566. pre_len = 4 - (offset & 3);
  3567. if (pre_len >= len32) {
  3568. pre_len = len32;
  3569. cmd_flags = BNX2_NVM_COMMAND_FIRST |
  3570. BNX2_NVM_COMMAND_LAST;
  3571. }
  3572. else {
  3573. cmd_flags = BNX2_NVM_COMMAND_FIRST;
  3574. }
  3575. rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
  3576. if (rc)
  3577. return rc;
  3578. memcpy(ret_buf, buf + (offset & 3), pre_len);
  3579. offset32 += 4;
  3580. ret_buf += pre_len;
  3581. len32 -= pre_len;
  3582. }
  3583. if (len32 & 3) {
  3584. extra = 4 - (len32 & 3);
  3585. len32 = (len32 + 4) & ~3;
  3586. }
  3587. if (len32 == 4) {
  3588. u8 buf[4];
  3589. if (cmd_flags)
  3590. cmd_flags = BNX2_NVM_COMMAND_LAST;
  3591. else
  3592. cmd_flags = BNX2_NVM_COMMAND_FIRST |
  3593. BNX2_NVM_COMMAND_LAST;
  3594. rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
  3595. memcpy(ret_buf, buf, 4 - extra);
  3596. }
  3597. else if (len32 > 0) {
  3598. u8 buf[4];
  3599. /* Read the first word. */
  3600. if (cmd_flags)
  3601. cmd_flags = 0;
  3602. else
  3603. cmd_flags = BNX2_NVM_COMMAND_FIRST;
  3604. rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
  3605. /* Advance to the next dword. */
  3606. offset32 += 4;
  3607. ret_buf += 4;
  3608. len32 -= 4;
  3609. while (len32 > 4 && rc == 0) {
  3610. rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
  3611. /* Advance to the next dword. */
  3612. offset32 += 4;
  3613. ret_buf += 4;
  3614. len32 -= 4;
  3615. }
  3616. if (rc)
  3617. return rc;
  3618. cmd_flags = BNX2_NVM_COMMAND_LAST;
  3619. rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
  3620. memcpy(ret_buf, buf, 4 - extra);
  3621. }
  3622. /* Disable access to flash interface */
  3623. bnx2_disable_nvram_access(bp);
  3624. bnx2_release_nvram_lock(bp);
  3625. return rc;
  3626. }
  3627. static int
  3628. bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
  3629. int buf_size)
  3630. {
  3631. u32 written, offset32, len32;
  3632. u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
  3633. int rc = 0;
  3634. int align_start, align_end;
  3635. buf = data_buf;
  3636. offset32 = offset;
  3637. len32 = buf_size;
  3638. align_start = align_end = 0;
  3639. if ((align_start = (offset32 & 3))) {
  3640. offset32 &= ~3;
  3641. len32 += align_start;
  3642. if (len32 < 4)
  3643. len32 = 4;
  3644. if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
  3645. return rc;
  3646. }
  3647. if (len32 & 3) {
  3648. align_end = 4 - (len32 & 3);
  3649. len32 += align_end;
  3650. if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
  3651. return rc;
  3652. }
  3653. if (align_start || align_end) {
  3654. align_buf = kmalloc(len32, GFP_KERNEL);
  3655. if (align_buf == NULL)
  3656. return -ENOMEM;
  3657. if (align_start) {
  3658. memcpy(align_buf, start, 4);
  3659. }
  3660. if (align_end) {
  3661. memcpy(align_buf + len32 - 4, end, 4);
  3662. }
  3663. memcpy(align_buf + align_start, data_buf, buf_size);
  3664. buf = align_buf;
  3665. }
  3666. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3667. flash_buffer = kmalloc(264, GFP_KERNEL);
  3668. if (flash_buffer == NULL) {
  3669. rc = -ENOMEM;
  3670. goto nvram_write_end;
  3671. }
  3672. }
  3673. written = 0;
  3674. while ((written < len32) && (rc == 0)) {
  3675. u32 page_start, page_end, data_start, data_end;
  3676. u32 addr, cmd_flags;
  3677. int i;
  3678. /* Find the page_start addr */
  3679. page_start = offset32 + written;
  3680. page_start -= (page_start % bp->flash_info->page_size);
  3681. /* Find the page_end addr */
  3682. page_end = page_start + bp->flash_info->page_size;
  3683. /* Find the data_start addr */
  3684. data_start = (written == 0) ? offset32 : page_start;
  3685. /* Find the data_end addr */
  3686. data_end = (page_end > offset32 + len32) ?
  3687. (offset32 + len32) : page_end;
  3688. /* Request access to the flash interface. */
  3689. if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
  3690. goto nvram_write_end;
  3691. /* Enable access to flash interface */
  3692. bnx2_enable_nvram_access(bp);
  3693. cmd_flags = BNX2_NVM_COMMAND_FIRST;
  3694. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3695. int j;
  3696. /* Read the whole page into the buffer
  3697. * (non-buffer flash only) */
  3698. for (j = 0; j < bp->flash_info->page_size; j += 4) {
  3699. if (j == (bp->flash_info->page_size - 4)) {
  3700. cmd_flags |= BNX2_NVM_COMMAND_LAST;
  3701. }
  3702. rc = bnx2_nvram_read_dword(bp,
  3703. page_start + j,
  3704. &flash_buffer[j],
  3705. cmd_flags);
  3706. if (rc)
  3707. goto nvram_write_end;
  3708. cmd_flags = 0;
  3709. }
  3710. }
  3711. /* Enable writes to flash interface (unlock write-protect) */
  3712. if ((rc = bnx2_enable_nvram_write(bp)) != 0)
  3713. goto nvram_write_end;
  3714. /* Loop to write back the buffer data from page_start to
  3715. * data_start */
  3716. i = 0;
  3717. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3718. /* Erase the page */
  3719. if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
  3720. goto nvram_write_end;
  3721. /* Re-enable the write again for the actual write */
  3722. bnx2_enable_nvram_write(bp);
  3723. for (addr = page_start; addr < data_start;
  3724. addr += 4, i += 4) {
  3725. rc = bnx2_nvram_write_dword(bp, addr,
  3726. &flash_buffer[i], cmd_flags);
  3727. if (rc != 0)
  3728. goto nvram_write_end;
  3729. cmd_flags = 0;
  3730. }
  3731. }
  3732. /* Loop to write the new data from data_start to data_end */
  3733. for (addr = data_start; addr < data_end; addr += 4, i += 4) {
  3734. if ((addr == page_end - 4) ||
  3735. ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
  3736. (addr == data_end - 4))) {
  3737. cmd_flags |= BNX2_NVM_COMMAND_LAST;
  3738. }
  3739. rc = bnx2_nvram_write_dword(bp, addr, buf,
  3740. cmd_flags);
  3741. if (rc != 0)
  3742. goto nvram_write_end;
  3743. cmd_flags = 0;
  3744. buf += 4;
  3745. }
  3746. /* Loop to write back the buffer data from data_end
  3747. * to page_end */
  3748. if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
  3749. for (addr = data_end; addr < page_end;
  3750. addr += 4, i += 4) {
  3751. if (addr == page_end-4) {
  3752. cmd_flags = BNX2_NVM_COMMAND_LAST;
  3753. }
  3754. rc = bnx2_nvram_write_dword(bp, addr,
  3755. &flash_buffer[i], cmd_flags);
  3756. if (rc != 0)
  3757. goto nvram_write_end;
  3758. cmd_flags = 0;
  3759. }
  3760. }
  3761. /* Disable writes to flash interface (lock write-protect) */
  3762. bnx2_disable_nvram_write(bp);
  3763. /* Disable access to flash interface */
  3764. bnx2_disable_nvram_access(bp);
  3765. bnx2_release_nvram_lock(bp);
  3766. /* Increment written */
  3767. written += data_end - data_start;
  3768. }
  3769. nvram_write_end:
  3770. kfree(flash_buffer);
  3771. kfree(align_buf);
  3772. return rc;
  3773. }
  3774. static void
  3775. bnx2_init_fw_cap(struct bnx2 *bp)
  3776. {
  3777. u32 val, sig = 0;
  3778. bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
  3779. bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
  3780. if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
  3781. bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
  3782. val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
  3783. if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
  3784. return;
  3785. if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
  3786. bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
  3787. sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
  3788. }
  3789. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  3790. (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
  3791. u32 link;
  3792. bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
  3793. link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
  3794. if (link & BNX2_LINK_STATUS_SERDES_LINK)
  3795. bp->phy_port = PORT_FIBRE;
  3796. else
  3797. bp->phy_port = PORT_TP;
  3798. sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
  3799. BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
  3800. }
  3801. if (netif_running(bp->dev) && sig)
  3802. bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
  3803. }
  3804. static void
  3805. bnx2_setup_msix_tbl(struct bnx2 *bp)
  3806. {
  3807. REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
  3808. REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
  3809. REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
  3810. }
  3811. static int
  3812. bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
  3813. {
  3814. u32 val;
  3815. int i, rc = 0;
  3816. u8 old_port;
  3817. /* Wait for the current PCI transaction to complete before
  3818. * issuing a reset. */
  3819. if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
  3820. (CHIP_NUM(bp) == CHIP_NUM_5708)) {
  3821. REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
  3822. BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
  3823. BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
  3824. BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
  3825. BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
  3826. val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
  3827. udelay(5);
  3828. } else { /* 5709 */
  3829. val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
  3830. val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
  3831. REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
  3832. val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
  3833. for (i = 0; i < 100; i++) {
  3834. msleep(1);
  3835. val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
  3836. if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
  3837. break;
  3838. }
  3839. }
  3840. /* Wait for the firmware to tell us it is ok to issue a reset. */
  3841. bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
  3842. /* Deposit a driver reset signature so the firmware knows that
  3843. * this is a soft reset. */
  3844. bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
  3845. BNX2_DRV_RESET_SIGNATURE_MAGIC);
  3846. /* Do a dummy read to force the chip to complete all current transaction
  3847. * before we issue a reset. */
  3848. val = REG_RD(bp, BNX2_MISC_ID);
  3849. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3850. REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
  3851. REG_RD(bp, BNX2_MISC_COMMAND);
  3852. udelay(5);
  3853. val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
  3854. BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
  3855. REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
  3856. } else {
  3857. val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
  3858. BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
  3859. BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
  3860. /* Chip reset. */
  3861. REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
  3862. /* Reading back any register after chip reset will hang the
  3863. * bus on 5706 A0 and A1. The msleep below provides plenty
  3864. * of margin for write posting.
  3865. */
  3866. if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
  3867. (CHIP_ID(bp) == CHIP_ID_5706_A1))
  3868. msleep(20);
  3869. /* Reset takes approximate 30 usec */
  3870. for (i = 0; i < 10; i++) {
  3871. val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
  3872. if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
  3873. BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
  3874. break;
  3875. udelay(10);
  3876. }
  3877. if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
  3878. BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
  3879. pr_err("Chip reset did not complete\n");
  3880. return -EBUSY;
  3881. }
  3882. }
  3883. /* Make sure byte swapping is properly configured. */
  3884. val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
  3885. if (val != 0x01020304) {
  3886. pr_err("Chip not in correct endian mode\n");
  3887. return -ENODEV;
  3888. }
  3889. /* Wait for the firmware to finish its initialization. */
  3890. rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
  3891. if (rc)
  3892. return rc;
  3893. spin_lock_bh(&bp->phy_lock);
  3894. old_port = bp->phy_port;
  3895. bnx2_init_fw_cap(bp);
  3896. if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
  3897. old_port != bp->phy_port)
  3898. bnx2_set_default_remote_link(bp);
  3899. spin_unlock_bh(&bp->phy_lock);
  3900. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  3901. /* Adjust the voltage regular to two steps lower. The default
  3902. * of this register is 0x0000000e. */
  3903. REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
  3904. /* Remove bad rbuf memory from the free pool. */
  3905. rc = bnx2_alloc_bad_rbuf(bp);
  3906. }
  3907. if (bp->flags & BNX2_FLAG_USING_MSIX) {
  3908. bnx2_setup_msix_tbl(bp);
  3909. /* Prevent MSIX table reads and write from timing out */
  3910. REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
  3911. BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
  3912. }
  3913. return rc;
  3914. }
  3915. static int
  3916. bnx2_init_chip(struct bnx2 *bp)
  3917. {
  3918. u32 val, mtu;
  3919. int rc, i;
  3920. /* Make sure the interrupt is not active. */
  3921. REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  3922. val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
  3923. BNX2_DMA_CONFIG_DATA_WORD_SWAP |
  3924. #ifdef __BIG_ENDIAN
  3925. BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
  3926. #endif
  3927. BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
  3928. DMA_READ_CHANS << 12 |
  3929. DMA_WRITE_CHANS << 16;
  3930. val |= (0x2 << 20) | (1 << 11);
  3931. if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
  3932. val |= (1 << 23);
  3933. if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
  3934. (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
  3935. val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
  3936. REG_WR(bp, BNX2_DMA_CONFIG, val);
  3937. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  3938. val = REG_RD(bp, BNX2_TDMA_CONFIG);
  3939. val |= BNX2_TDMA_CONFIG_ONE_DMA;
  3940. REG_WR(bp, BNX2_TDMA_CONFIG, val);
  3941. }
  3942. if (bp->flags & BNX2_FLAG_PCIX) {
  3943. u16 val16;
  3944. pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
  3945. &val16);
  3946. pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
  3947. val16 & ~PCI_X_CMD_ERO);
  3948. }
  3949. REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
  3950. BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
  3951. BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
  3952. BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
  3953. /* Initialize context mapping and zero out the quick contexts. The
  3954. * context block must have already been enabled. */
  3955. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3956. rc = bnx2_init_5709_context(bp);
  3957. if (rc)
  3958. return rc;
  3959. } else
  3960. bnx2_init_context(bp);
  3961. if ((rc = bnx2_init_cpus(bp)) != 0)
  3962. return rc;
  3963. bnx2_init_nvram(bp);
  3964. bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
  3965. val = REG_RD(bp, BNX2_MQ_CONFIG);
  3966. val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
  3967. val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
  3968. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  3969. val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
  3970. if (CHIP_REV(bp) == CHIP_REV_Ax)
  3971. val |= BNX2_MQ_CONFIG_HALT_DIS;
  3972. }
  3973. REG_WR(bp, BNX2_MQ_CONFIG, val);
  3974. val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
  3975. REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
  3976. REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
  3977. val = (BCM_PAGE_BITS - 8) << 24;
  3978. REG_WR(bp, BNX2_RV2P_CONFIG, val);
  3979. /* Configure page size. */
  3980. val = REG_RD(bp, BNX2_TBDR_CONFIG);
  3981. val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
  3982. val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
  3983. REG_WR(bp, BNX2_TBDR_CONFIG, val);
  3984. val = bp->mac_addr[0] +
  3985. (bp->mac_addr[1] << 8) +
  3986. (bp->mac_addr[2] << 16) +
  3987. bp->mac_addr[3] +
  3988. (bp->mac_addr[4] << 8) +
  3989. (bp->mac_addr[5] << 16);
  3990. REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
  3991. /* Program the MTU. Also include 4 bytes for CRC32. */
  3992. mtu = bp->dev->mtu;
  3993. val = mtu + ETH_HLEN + ETH_FCS_LEN;
  3994. if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
  3995. val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
  3996. REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
  3997. if (mtu < 1500)
  3998. mtu = 1500;
  3999. bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
  4000. bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
  4001. bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
  4002. memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
  4003. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
  4004. bp->bnx2_napi[i].last_status_idx = 0;
  4005. bp->idle_chk_status_idx = 0xffff;
  4006. bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
  4007. /* Set up how to generate a link change interrupt. */
  4008. REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
  4009. REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
  4010. (u64) bp->status_blk_mapping & 0xffffffff);
  4011. REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
  4012. REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
  4013. (u64) bp->stats_blk_mapping & 0xffffffff);
  4014. REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
  4015. (u64) bp->stats_blk_mapping >> 32);
  4016. REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
  4017. (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
  4018. REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
  4019. (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
  4020. REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
  4021. (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
  4022. REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
  4023. REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
  4024. REG_WR(bp, BNX2_HC_COM_TICKS,
  4025. (bp->com_ticks_int << 16) | bp->com_ticks);
  4026. REG_WR(bp, BNX2_HC_CMD_TICKS,
  4027. (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
  4028. if (bp->flags & BNX2_FLAG_BROKEN_STATS)
  4029. REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
  4030. else
  4031. REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
  4032. REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
  4033. if (CHIP_ID(bp) == CHIP_ID_5706_A1)
  4034. val = BNX2_HC_CONFIG_COLLECT_STATS;
  4035. else {
  4036. val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
  4037. BNX2_HC_CONFIG_COLLECT_STATS;
  4038. }
  4039. if (bp->flags & BNX2_FLAG_USING_MSIX) {
  4040. REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
  4041. BNX2_HC_MSIX_BIT_VECTOR_VAL);
  4042. val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
  4043. }
  4044. if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
  4045. val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
  4046. REG_WR(bp, BNX2_HC_CONFIG, val);
  4047. if (bp->rx_ticks < 25)
  4048. bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
  4049. else
  4050. bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
  4051. for (i = 1; i < bp->irq_nvecs; i++) {
  4052. u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
  4053. BNX2_HC_SB_CONFIG_1;
  4054. REG_WR(bp, base,
  4055. BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
  4056. BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
  4057. BNX2_HC_SB_CONFIG_1_ONE_SHOT);
  4058. REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
  4059. (bp->tx_quick_cons_trip_int << 16) |
  4060. bp->tx_quick_cons_trip);
  4061. REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
  4062. (bp->tx_ticks_int << 16) | bp->tx_ticks);
  4063. REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
  4064. (bp->rx_quick_cons_trip_int << 16) |
  4065. bp->rx_quick_cons_trip);
  4066. REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
  4067. (bp->rx_ticks_int << 16) | bp->rx_ticks);
  4068. }
  4069. /* Clear internal stats counters. */
  4070. REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
  4071. REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
  4072. /* Initialize the receive filter. */
  4073. bnx2_set_rx_mode(bp->dev);
  4074. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  4075. val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
  4076. val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
  4077. REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
  4078. }
  4079. rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
  4080. 1, 0);
  4081. REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
  4082. REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
  4083. udelay(20);
  4084. bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
  4085. return rc;
  4086. }
  4087. static void
  4088. bnx2_clear_ring_states(struct bnx2 *bp)
  4089. {
  4090. struct bnx2_napi *bnapi;
  4091. struct bnx2_tx_ring_info *txr;
  4092. struct bnx2_rx_ring_info *rxr;
  4093. int i;
  4094. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
  4095. bnapi = &bp->bnx2_napi[i];
  4096. txr = &bnapi->tx_ring;
  4097. rxr = &bnapi->rx_ring;
  4098. txr->tx_cons = 0;
  4099. txr->hw_tx_cons = 0;
  4100. rxr->rx_prod_bseq = 0;
  4101. rxr->rx_prod = 0;
  4102. rxr->rx_cons = 0;
  4103. rxr->rx_pg_prod = 0;
  4104. rxr->rx_pg_cons = 0;
  4105. }
  4106. }
  4107. static void
  4108. bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
  4109. {
  4110. u32 val, offset0, offset1, offset2, offset3;
  4111. u32 cid_addr = GET_CID_ADDR(cid);
  4112. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  4113. offset0 = BNX2_L2CTX_TYPE_XI;
  4114. offset1 = BNX2_L2CTX_CMD_TYPE_XI;
  4115. offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
  4116. offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
  4117. } else {
  4118. offset0 = BNX2_L2CTX_TYPE;
  4119. offset1 = BNX2_L2CTX_CMD_TYPE;
  4120. offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
  4121. offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
  4122. }
  4123. val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
  4124. bnx2_ctx_wr(bp, cid_addr, offset0, val);
  4125. val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
  4126. bnx2_ctx_wr(bp, cid_addr, offset1, val);
  4127. val = (u64) txr->tx_desc_mapping >> 32;
  4128. bnx2_ctx_wr(bp, cid_addr, offset2, val);
  4129. val = (u64) txr->tx_desc_mapping & 0xffffffff;
  4130. bnx2_ctx_wr(bp, cid_addr, offset3, val);
  4131. }
  4132. static void
  4133. bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
  4134. {
  4135. struct tx_bd *txbd;
  4136. u32 cid = TX_CID;
  4137. struct bnx2_napi *bnapi;
  4138. struct bnx2_tx_ring_info *txr;
  4139. bnapi = &bp->bnx2_napi[ring_num];
  4140. txr = &bnapi->tx_ring;
  4141. if (ring_num == 0)
  4142. cid = TX_CID;
  4143. else
  4144. cid = TX_TSS_CID + ring_num - 1;
  4145. bp->tx_wake_thresh = bp->tx_ring_size / 2;
  4146. txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
  4147. txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
  4148. txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
  4149. txr->tx_prod = 0;
  4150. txr->tx_prod_bseq = 0;
  4151. txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
  4152. txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
  4153. bnx2_init_tx_context(bp, cid, txr);
  4154. }
  4155. static void
  4156. bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
  4157. int num_rings)
  4158. {
  4159. int i;
  4160. struct rx_bd *rxbd;
  4161. for (i = 0; i < num_rings; i++) {
  4162. int j;
  4163. rxbd = &rx_ring[i][0];
  4164. for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
  4165. rxbd->rx_bd_len = buf_size;
  4166. rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
  4167. }
  4168. if (i == (num_rings - 1))
  4169. j = 0;
  4170. else
  4171. j = i + 1;
  4172. rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
  4173. rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
  4174. }
  4175. }
  4176. static void
  4177. bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
  4178. {
  4179. int i;
  4180. u16 prod, ring_prod;
  4181. u32 cid, rx_cid_addr, val;
  4182. struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
  4183. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  4184. if (ring_num == 0)
  4185. cid = RX_CID;
  4186. else
  4187. cid = RX_RSS_CID + ring_num - 1;
  4188. rx_cid_addr = GET_CID_ADDR(cid);
  4189. bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
  4190. bp->rx_buf_use_size, bp->rx_max_ring);
  4191. bnx2_init_rx_context(bp, cid);
  4192. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  4193. val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
  4194. REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
  4195. }
  4196. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
  4197. if (bp->rx_pg_ring_size) {
  4198. bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
  4199. rxr->rx_pg_desc_mapping,
  4200. PAGE_SIZE, bp->rx_max_pg_ring);
  4201. val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
  4202. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
  4203. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
  4204. BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
  4205. val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
  4206. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
  4207. val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
  4208. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
  4209. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  4210. REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
  4211. }
  4212. val = (u64) rxr->rx_desc_mapping[0] >> 32;
  4213. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
  4214. val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
  4215. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
  4216. ring_prod = prod = rxr->rx_pg_prod;
  4217. for (i = 0; i < bp->rx_pg_ring_size; i++) {
  4218. if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
  4219. netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
  4220. ring_num, i, bp->rx_pg_ring_size);
  4221. break;
  4222. }
  4223. prod = NEXT_RX_BD(prod);
  4224. ring_prod = RX_PG_RING_IDX(prod);
  4225. }
  4226. rxr->rx_pg_prod = prod;
  4227. ring_prod = prod = rxr->rx_prod;
  4228. for (i = 0; i < bp->rx_ring_size; i++) {
  4229. if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
  4230. netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
  4231. ring_num, i, bp->rx_ring_size);
  4232. break;
  4233. }
  4234. prod = NEXT_RX_BD(prod);
  4235. ring_prod = RX_RING_IDX(prod);
  4236. }
  4237. rxr->rx_prod = prod;
  4238. rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
  4239. rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
  4240. rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
  4241. REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
  4242. REG_WR16(bp, rxr->rx_bidx_addr, prod);
  4243. REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
  4244. }
  4245. static void
  4246. bnx2_init_all_rings(struct bnx2 *bp)
  4247. {
  4248. int i;
  4249. u32 val;
  4250. bnx2_clear_ring_states(bp);
  4251. REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
  4252. for (i = 0; i < bp->num_tx_rings; i++)
  4253. bnx2_init_tx_ring(bp, i);
  4254. if (bp->num_tx_rings > 1)
  4255. REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
  4256. (TX_TSS_CID << 7));
  4257. REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
  4258. bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
  4259. for (i = 0; i < bp->num_rx_rings; i++)
  4260. bnx2_init_rx_ring(bp, i);
  4261. if (bp->num_rx_rings > 1) {
  4262. u32 tbl_32 = 0;
  4263. for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
  4264. int shift = (i % 8) << 2;
  4265. tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
  4266. if ((i % 8) == 7) {
  4267. REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
  4268. REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
  4269. BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
  4270. BNX2_RLUP_RSS_COMMAND_WRITE |
  4271. BNX2_RLUP_RSS_COMMAND_HASH_MASK);
  4272. tbl_32 = 0;
  4273. }
  4274. }
  4275. val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
  4276. BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
  4277. REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
  4278. }
  4279. }
  4280. static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
  4281. {
  4282. u32 max, num_rings = 1;
  4283. while (ring_size > MAX_RX_DESC_CNT) {
  4284. ring_size -= MAX_RX_DESC_CNT;
  4285. num_rings++;
  4286. }
  4287. /* round to next power of 2 */
  4288. max = max_size;
  4289. while ((max & num_rings) == 0)
  4290. max >>= 1;
  4291. if (num_rings != max)
  4292. max <<= 1;
  4293. return max;
  4294. }
  4295. static void
  4296. bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
  4297. {
  4298. u32 rx_size, rx_space, jumbo_size;
  4299. /* 8 for CRC and VLAN */
  4300. rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
  4301. rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
  4302. sizeof(struct skb_shared_info);
  4303. bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
  4304. bp->rx_pg_ring_size = 0;
  4305. bp->rx_max_pg_ring = 0;
  4306. bp->rx_max_pg_ring_idx = 0;
  4307. if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
  4308. int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
  4309. jumbo_size = size * pages;
  4310. if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
  4311. jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
  4312. bp->rx_pg_ring_size = jumbo_size;
  4313. bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
  4314. MAX_RX_PG_RINGS);
  4315. bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
  4316. rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
  4317. bp->rx_copy_thresh = 0;
  4318. }
  4319. bp->rx_buf_use_size = rx_size;
  4320. /* hw alignment */
  4321. bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
  4322. bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
  4323. bp->rx_ring_size = size;
  4324. bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
  4325. bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
  4326. }
  4327. static void
  4328. bnx2_free_tx_skbs(struct bnx2 *bp)
  4329. {
  4330. int i;
  4331. for (i = 0; i < bp->num_tx_rings; i++) {
  4332. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  4333. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  4334. int j;
  4335. if (txr->tx_buf_ring == NULL)
  4336. continue;
  4337. for (j = 0; j < TX_DESC_CNT; ) {
  4338. struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
  4339. struct sk_buff *skb = tx_buf->skb;
  4340. int k, last;
  4341. if (skb == NULL) {
  4342. j++;
  4343. continue;
  4344. }
  4345. dma_unmap_single(&bp->pdev->dev,
  4346. dma_unmap_addr(tx_buf, mapping),
  4347. skb_headlen(skb),
  4348. PCI_DMA_TODEVICE);
  4349. tx_buf->skb = NULL;
  4350. last = tx_buf->nr_frags;
  4351. j++;
  4352. for (k = 0; k < last; k++, j++) {
  4353. tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
  4354. dma_unmap_page(&bp->pdev->dev,
  4355. dma_unmap_addr(tx_buf, mapping),
  4356. skb_shinfo(skb)->frags[k].size,
  4357. PCI_DMA_TODEVICE);
  4358. }
  4359. dev_kfree_skb(skb);
  4360. }
  4361. }
  4362. }
  4363. static void
  4364. bnx2_free_rx_skbs(struct bnx2 *bp)
  4365. {
  4366. int i;
  4367. for (i = 0; i < bp->num_rx_rings; i++) {
  4368. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  4369. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  4370. int j;
  4371. if (rxr->rx_buf_ring == NULL)
  4372. return;
  4373. for (j = 0; j < bp->rx_max_ring_idx; j++) {
  4374. struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
  4375. struct sk_buff *skb = rx_buf->skb;
  4376. if (skb == NULL)
  4377. continue;
  4378. dma_unmap_single(&bp->pdev->dev,
  4379. dma_unmap_addr(rx_buf, mapping),
  4380. bp->rx_buf_use_size,
  4381. PCI_DMA_FROMDEVICE);
  4382. rx_buf->skb = NULL;
  4383. dev_kfree_skb(skb);
  4384. }
  4385. for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
  4386. bnx2_free_rx_page(bp, rxr, j);
  4387. }
  4388. }
  4389. static void
  4390. bnx2_free_skbs(struct bnx2 *bp)
  4391. {
  4392. bnx2_free_tx_skbs(bp);
  4393. bnx2_free_rx_skbs(bp);
  4394. }
  4395. static int
  4396. bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
  4397. {
  4398. int rc;
  4399. rc = bnx2_reset_chip(bp, reset_code);
  4400. bnx2_free_skbs(bp);
  4401. if (rc)
  4402. return rc;
  4403. if ((rc = bnx2_init_chip(bp)) != 0)
  4404. return rc;
  4405. bnx2_init_all_rings(bp);
  4406. return 0;
  4407. }
  4408. static int
  4409. bnx2_init_nic(struct bnx2 *bp, int reset_phy)
  4410. {
  4411. int rc;
  4412. if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
  4413. return rc;
  4414. spin_lock_bh(&bp->phy_lock);
  4415. bnx2_init_phy(bp, reset_phy);
  4416. bnx2_set_link(bp);
  4417. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  4418. bnx2_remote_phy_event(bp);
  4419. spin_unlock_bh(&bp->phy_lock);
  4420. return 0;
  4421. }
  4422. static int
  4423. bnx2_shutdown_chip(struct bnx2 *bp)
  4424. {
  4425. u32 reset_code;
  4426. if (bp->flags & BNX2_FLAG_NO_WOL)
  4427. reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
  4428. else if (bp->wol)
  4429. reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
  4430. else
  4431. reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
  4432. return bnx2_reset_chip(bp, reset_code);
  4433. }
  4434. static int
  4435. bnx2_test_registers(struct bnx2 *bp)
  4436. {
  4437. int ret;
  4438. int i, is_5709;
  4439. static const struct {
  4440. u16 offset;
  4441. u16 flags;
  4442. #define BNX2_FL_NOT_5709 1
  4443. u32 rw_mask;
  4444. u32 ro_mask;
  4445. } reg_tbl[] = {
  4446. { 0x006c, 0, 0x00000000, 0x0000003f },
  4447. { 0x0090, 0, 0xffffffff, 0x00000000 },
  4448. { 0x0094, 0, 0x00000000, 0x00000000 },
  4449. { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
  4450. { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4451. { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4452. { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
  4453. { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
  4454. { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
  4455. { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
  4456. { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4457. { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4458. { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4459. { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
  4460. { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4461. { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4462. { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4463. { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
  4464. { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
  4465. { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
  4466. { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
  4467. { 0x1000, 0, 0x00000000, 0x00000001 },
  4468. { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
  4469. { 0x1408, 0, 0x01c00800, 0x00000000 },
  4470. { 0x149c, 0, 0x8000ffff, 0x00000000 },
  4471. { 0x14a8, 0, 0x00000000, 0x000001ff },
  4472. { 0x14ac, 0, 0x0fffffff, 0x10000000 },
  4473. { 0x14b0, 0, 0x00000002, 0x00000001 },
  4474. { 0x14b8, 0, 0x00000000, 0x00000000 },
  4475. { 0x14c0, 0, 0x00000000, 0x00000009 },
  4476. { 0x14c4, 0, 0x00003fff, 0x00000000 },
  4477. { 0x14cc, 0, 0x00000000, 0x00000001 },
  4478. { 0x14d0, 0, 0xffffffff, 0x00000000 },
  4479. { 0x1800, 0, 0x00000000, 0x00000001 },
  4480. { 0x1804, 0, 0x00000000, 0x00000003 },
  4481. { 0x2800, 0, 0x00000000, 0x00000001 },
  4482. { 0x2804, 0, 0x00000000, 0x00003f01 },
  4483. { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
  4484. { 0x2810, 0, 0xffff0000, 0x00000000 },
  4485. { 0x2814, 0, 0xffff0000, 0x00000000 },
  4486. { 0x2818, 0, 0xffff0000, 0x00000000 },
  4487. { 0x281c, 0, 0xffff0000, 0x00000000 },
  4488. { 0x2834, 0, 0xffffffff, 0x00000000 },
  4489. { 0x2840, 0, 0x00000000, 0xffffffff },
  4490. { 0x2844, 0, 0x00000000, 0xffffffff },
  4491. { 0x2848, 0, 0xffffffff, 0x00000000 },
  4492. { 0x284c, 0, 0xf800f800, 0x07ff07ff },
  4493. { 0x2c00, 0, 0x00000000, 0x00000011 },
  4494. { 0x2c04, 0, 0x00000000, 0x00030007 },
  4495. { 0x3c00, 0, 0x00000000, 0x00000001 },
  4496. { 0x3c04, 0, 0x00000000, 0x00070000 },
  4497. { 0x3c08, 0, 0x00007f71, 0x07f00000 },
  4498. { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
  4499. { 0x3c10, 0, 0xffffffff, 0x00000000 },
  4500. { 0x3c14, 0, 0x00000000, 0xffffffff },
  4501. { 0x3c18, 0, 0x00000000, 0xffffffff },
  4502. { 0x3c1c, 0, 0xfffff000, 0x00000000 },
  4503. { 0x3c20, 0, 0xffffff00, 0x00000000 },
  4504. { 0x5004, 0, 0x00000000, 0x0000007f },
  4505. { 0x5008, 0, 0x0f0007ff, 0x00000000 },
  4506. { 0x5c00, 0, 0x00000000, 0x00000001 },
  4507. { 0x5c04, 0, 0x00000000, 0x0003000f },
  4508. { 0x5c08, 0, 0x00000003, 0x00000000 },
  4509. { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
  4510. { 0x5c10, 0, 0x00000000, 0xffffffff },
  4511. { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
  4512. { 0x5c84, 0, 0x00000000, 0x0000f333 },
  4513. { 0x5c88, 0, 0x00000000, 0x00077373 },
  4514. { 0x5c8c, 0, 0x00000000, 0x0007f737 },
  4515. { 0x6808, 0, 0x0000ff7f, 0x00000000 },
  4516. { 0x680c, 0, 0xffffffff, 0x00000000 },
  4517. { 0x6810, 0, 0xffffffff, 0x00000000 },
  4518. { 0x6814, 0, 0xffffffff, 0x00000000 },
  4519. { 0x6818, 0, 0xffffffff, 0x00000000 },
  4520. { 0x681c, 0, 0xffffffff, 0x00000000 },
  4521. { 0x6820, 0, 0x00ff00ff, 0x00000000 },
  4522. { 0x6824, 0, 0x00ff00ff, 0x00000000 },
  4523. { 0x6828, 0, 0x00ff00ff, 0x00000000 },
  4524. { 0x682c, 0, 0x03ff03ff, 0x00000000 },
  4525. { 0x6830, 0, 0x03ff03ff, 0x00000000 },
  4526. { 0x6834, 0, 0x03ff03ff, 0x00000000 },
  4527. { 0x6838, 0, 0x03ff03ff, 0x00000000 },
  4528. { 0x683c, 0, 0x0000ffff, 0x00000000 },
  4529. { 0x6840, 0, 0x00000ff0, 0x00000000 },
  4530. { 0x6844, 0, 0x00ffff00, 0x00000000 },
  4531. { 0x684c, 0, 0xffffffff, 0x00000000 },
  4532. { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
  4533. { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
  4534. { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
  4535. { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
  4536. { 0x6908, 0, 0x00000000, 0x0001ff0f },
  4537. { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
  4538. { 0xffff, 0, 0x00000000, 0x00000000 },
  4539. };
  4540. ret = 0;
  4541. is_5709 = 0;
  4542. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  4543. is_5709 = 1;
  4544. for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
  4545. u32 offset, rw_mask, ro_mask, save_val, val;
  4546. u16 flags = reg_tbl[i].flags;
  4547. if (is_5709 && (flags & BNX2_FL_NOT_5709))
  4548. continue;
  4549. offset = (u32) reg_tbl[i].offset;
  4550. rw_mask = reg_tbl[i].rw_mask;
  4551. ro_mask = reg_tbl[i].ro_mask;
  4552. save_val = readl(bp->regview + offset);
  4553. writel(0, bp->regview + offset);
  4554. val = readl(bp->regview + offset);
  4555. if ((val & rw_mask) != 0) {
  4556. goto reg_test_err;
  4557. }
  4558. if ((val & ro_mask) != (save_val & ro_mask)) {
  4559. goto reg_test_err;
  4560. }
  4561. writel(0xffffffff, bp->regview + offset);
  4562. val = readl(bp->regview + offset);
  4563. if ((val & rw_mask) != rw_mask) {
  4564. goto reg_test_err;
  4565. }
  4566. if ((val & ro_mask) != (save_val & ro_mask)) {
  4567. goto reg_test_err;
  4568. }
  4569. writel(save_val, bp->regview + offset);
  4570. continue;
  4571. reg_test_err:
  4572. writel(save_val, bp->regview + offset);
  4573. ret = -ENODEV;
  4574. break;
  4575. }
  4576. return ret;
  4577. }
  4578. static int
  4579. bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
  4580. {
  4581. static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
  4582. 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
  4583. int i;
  4584. for (i = 0; i < sizeof(test_pattern) / 4; i++) {
  4585. u32 offset;
  4586. for (offset = 0; offset < size; offset += 4) {
  4587. bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
  4588. if (bnx2_reg_rd_ind(bp, start + offset) !=
  4589. test_pattern[i]) {
  4590. return -ENODEV;
  4591. }
  4592. }
  4593. }
  4594. return 0;
  4595. }
  4596. static int
  4597. bnx2_test_memory(struct bnx2 *bp)
  4598. {
  4599. int ret = 0;
  4600. int i;
  4601. static struct mem_entry {
  4602. u32 offset;
  4603. u32 len;
  4604. } mem_tbl_5706[] = {
  4605. { 0x60000, 0x4000 },
  4606. { 0xa0000, 0x3000 },
  4607. { 0xe0000, 0x4000 },
  4608. { 0x120000, 0x4000 },
  4609. { 0x1a0000, 0x4000 },
  4610. { 0x160000, 0x4000 },
  4611. { 0xffffffff, 0 },
  4612. },
  4613. mem_tbl_5709[] = {
  4614. { 0x60000, 0x4000 },
  4615. { 0xa0000, 0x3000 },
  4616. { 0xe0000, 0x4000 },
  4617. { 0x120000, 0x4000 },
  4618. { 0x1a0000, 0x4000 },
  4619. { 0xffffffff, 0 },
  4620. };
  4621. struct mem_entry *mem_tbl;
  4622. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  4623. mem_tbl = mem_tbl_5709;
  4624. else
  4625. mem_tbl = mem_tbl_5706;
  4626. for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
  4627. if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
  4628. mem_tbl[i].len)) != 0) {
  4629. return ret;
  4630. }
  4631. }
  4632. return ret;
  4633. }
  4634. #define BNX2_MAC_LOOPBACK 0
  4635. #define BNX2_PHY_LOOPBACK 1
  4636. static int
  4637. bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
  4638. {
  4639. unsigned int pkt_size, num_pkts, i;
  4640. struct sk_buff *skb, *rx_skb;
  4641. unsigned char *packet;
  4642. u16 rx_start_idx, rx_idx;
  4643. dma_addr_t map;
  4644. struct tx_bd *txbd;
  4645. struct sw_bd *rx_buf;
  4646. struct l2_fhdr *rx_hdr;
  4647. int ret = -ENODEV;
  4648. struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
  4649. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  4650. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  4651. tx_napi = bnapi;
  4652. txr = &tx_napi->tx_ring;
  4653. rxr = &bnapi->rx_ring;
  4654. if (loopback_mode == BNX2_MAC_LOOPBACK) {
  4655. bp->loopback = MAC_LOOPBACK;
  4656. bnx2_set_mac_loopback(bp);
  4657. }
  4658. else if (loopback_mode == BNX2_PHY_LOOPBACK) {
  4659. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  4660. return 0;
  4661. bp->loopback = PHY_LOOPBACK;
  4662. bnx2_set_phy_loopback(bp);
  4663. }
  4664. else
  4665. return -EINVAL;
  4666. pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
  4667. skb = netdev_alloc_skb(bp->dev, pkt_size);
  4668. if (!skb)
  4669. return -ENOMEM;
  4670. packet = skb_put(skb, pkt_size);
  4671. memcpy(packet, bp->dev->dev_addr, 6);
  4672. memset(packet + 6, 0x0, 8);
  4673. for (i = 14; i < pkt_size; i++)
  4674. packet[i] = (unsigned char) (i & 0xff);
  4675. map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
  4676. PCI_DMA_TODEVICE);
  4677. if (dma_mapping_error(&bp->pdev->dev, map)) {
  4678. dev_kfree_skb(skb);
  4679. return -EIO;
  4680. }
  4681. REG_WR(bp, BNX2_HC_COMMAND,
  4682. bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
  4683. REG_RD(bp, BNX2_HC_COMMAND);
  4684. udelay(5);
  4685. rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
  4686. num_pkts = 0;
  4687. txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
  4688. txbd->tx_bd_haddr_hi = (u64) map >> 32;
  4689. txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
  4690. txbd->tx_bd_mss_nbytes = pkt_size;
  4691. txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
  4692. num_pkts++;
  4693. txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
  4694. txr->tx_prod_bseq += pkt_size;
  4695. REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
  4696. REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
  4697. udelay(100);
  4698. REG_WR(bp, BNX2_HC_COMMAND,
  4699. bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
  4700. REG_RD(bp, BNX2_HC_COMMAND);
  4701. udelay(5);
  4702. dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
  4703. dev_kfree_skb(skb);
  4704. if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
  4705. goto loopback_test_done;
  4706. rx_idx = bnx2_get_hw_rx_cons(bnapi);
  4707. if (rx_idx != rx_start_idx + num_pkts) {
  4708. goto loopback_test_done;
  4709. }
  4710. rx_buf = &rxr->rx_buf_ring[rx_start_idx];
  4711. rx_skb = rx_buf->skb;
  4712. rx_hdr = rx_buf->desc;
  4713. skb_reserve(rx_skb, BNX2_RX_OFFSET);
  4714. dma_sync_single_for_cpu(&bp->pdev->dev,
  4715. dma_unmap_addr(rx_buf, mapping),
  4716. bp->rx_buf_size, PCI_DMA_FROMDEVICE);
  4717. if (rx_hdr->l2_fhdr_status &
  4718. (L2_FHDR_ERRORS_BAD_CRC |
  4719. L2_FHDR_ERRORS_PHY_DECODE |
  4720. L2_FHDR_ERRORS_ALIGNMENT |
  4721. L2_FHDR_ERRORS_TOO_SHORT |
  4722. L2_FHDR_ERRORS_GIANT_FRAME)) {
  4723. goto loopback_test_done;
  4724. }
  4725. if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
  4726. goto loopback_test_done;
  4727. }
  4728. for (i = 14; i < pkt_size; i++) {
  4729. if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
  4730. goto loopback_test_done;
  4731. }
  4732. }
  4733. ret = 0;
  4734. loopback_test_done:
  4735. bp->loopback = 0;
  4736. return ret;
  4737. }
  4738. #define BNX2_MAC_LOOPBACK_FAILED 1
  4739. #define BNX2_PHY_LOOPBACK_FAILED 2
  4740. #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
  4741. BNX2_PHY_LOOPBACK_FAILED)
  4742. static int
  4743. bnx2_test_loopback(struct bnx2 *bp)
  4744. {
  4745. int rc = 0;
  4746. if (!netif_running(bp->dev))
  4747. return BNX2_LOOPBACK_FAILED;
  4748. bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
  4749. spin_lock_bh(&bp->phy_lock);
  4750. bnx2_init_phy(bp, 1);
  4751. spin_unlock_bh(&bp->phy_lock);
  4752. if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
  4753. rc |= BNX2_MAC_LOOPBACK_FAILED;
  4754. if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
  4755. rc |= BNX2_PHY_LOOPBACK_FAILED;
  4756. return rc;
  4757. }
  4758. #define NVRAM_SIZE 0x200
  4759. #define CRC32_RESIDUAL 0xdebb20e3
  4760. static int
  4761. bnx2_test_nvram(struct bnx2 *bp)
  4762. {
  4763. __be32 buf[NVRAM_SIZE / 4];
  4764. u8 *data = (u8 *) buf;
  4765. int rc = 0;
  4766. u32 magic, csum;
  4767. if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
  4768. goto test_nvram_done;
  4769. magic = be32_to_cpu(buf[0]);
  4770. if (magic != 0x669955aa) {
  4771. rc = -ENODEV;
  4772. goto test_nvram_done;
  4773. }
  4774. if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
  4775. goto test_nvram_done;
  4776. csum = ether_crc_le(0x100, data);
  4777. if (csum != CRC32_RESIDUAL) {
  4778. rc = -ENODEV;
  4779. goto test_nvram_done;
  4780. }
  4781. csum = ether_crc_le(0x100, data + 0x100);
  4782. if (csum != CRC32_RESIDUAL) {
  4783. rc = -ENODEV;
  4784. }
  4785. test_nvram_done:
  4786. return rc;
  4787. }
  4788. static int
  4789. bnx2_test_link(struct bnx2 *bp)
  4790. {
  4791. u32 bmsr;
  4792. if (!netif_running(bp->dev))
  4793. return -ENODEV;
  4794. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  4795. if (bp->link_up)
  4796. return 0;
  4797. return -ENODEV;
  4798. }
  4799. spin_lock_bh(&bp->phy_lock);
  4800. bnx2_enable_bmsr1(bp);
  4801. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  4802. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  4803. bnx2_disable_bmsr1(bp);
  4804. spin_unlock_bh(&bp->phy_lock);
  4805. if (bmsr & BMSR_LSTATUS) {
  4806. return 0;
  4807. }
  4808. return -ENODEV;
  4809. }
  4810. static int
  4811. bnx2_test_intr(struct bnx2 *bp)
  4812. {
  4813. int i;
  4814. u16 status_idx;
  4815. if (!netif_running(bp->dev))
  4816. return -ENODEV;
  4817. status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
  4818. /* This register is not touched during run-time. */
  4819. REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
  4820. REG_RD(bp, BNX2_HC_COMMAND);
  4821. for (i = 0; i < 10; i++) {
  4822. if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
  4823. status_idx) {
  4824. break;
  4825. }
  4826. msleep_interruptible(10);
  4827. }
  4828. if (i < 10)
  4829. return 0;
  4830. return -ENODEV;
  4831. }
  4832. /* Determining link for parallel detection. */
  4833. static int
  4834. bnx2_5706_serdes_has_link(struct bnx2 *bp)
  4835. {
  4836. u32 mode_ctl, an_dbg, exp;
  4837. if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
  4838. return 0;
  4839. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
  4840. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
  4841. if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
  4842. return 0;
  4843. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  4844. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  4845. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  4846. if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
  4847. return 0;
  4848. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
  4849. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
  4850. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
  4851. if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
  4852. return 0;
  4853. return 1;
  4854. }
  4855. static void
  4856. bnx2_5706_serdes_timer(struct bnx2 *bp)
  4857. {
  4858. int check_link = 1;
  4859. spin_lock(&bp->phy_lock);
  4860. if (bp->serdes_an_pending) {
  4861. bp->serdes_an_pending--;
  4862. check_link = 0;
  4863. } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
  4864. u32 bmcr;
  4865. bp->current_interval = BNX2_TIMER_INTERVAL;
  4866. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  4867. if (bmcr & BMCR_ANENABLE) {
  4868. if (bnx2_5706_serdes_has_link(bp)) {
  4869. bmcr &= ~BMCR_ANENABLE;
  4870. bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
  4871. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  4872. bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
  4873. }
  4874. }
  4875. }
  4876. else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
  4877. (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
  4878. u32 phy2;
  4879. bnx2_write_phy(bp, 0x17, 0x0f01);
  4880. bnx2_read_phy(bp, 0x15, &phy2);
  4881. if (phy2 & 0x20) {
  4882. u32 bmcr;
  4883. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  4884. bmcr |= BMCR_ANENABLE;
  4885. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  4886. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  4887. }
  4888. } else
  4889. bp->current_interval = BNX2_TIMER_INTERVAL;
  4890. if (check_link) {
  4891. u32 val;
  4892. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  4893. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
  4894. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
  4895. if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
  4896. if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
  4897. bnx2_5706s_force_link_dn(bp, 1);
  4898. bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
  4899. } else
  4900. bnx2_set_link(bp);
  4901. } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
  4902. bnx2_set_link(bp);
  4903. }
  4904. spin_unlock(&bp->phy_lock);
  4905. }
  4906. static void
  4907. bnx2_5708_serdes_timer(struct bnx2 *bp)
  4908. {
  4909. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  4910. return;
  4911. if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
  4912. bp->serdes_an_pending = 0;
  4913. return;
  4914. }
  4915. spin_lock(&bp->phy_lock);
  4916. if (bp->serdes_an_pending)
  4917. bp->serdes_an_pending--;
  4918. else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
  4919. u32 bmcr;
  4920. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  4921. if (bmcr & BMCR_ANENABLE) {
  4922. bnx2_enable_forced_2g5(bp);
  4923. bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
  4924. } else {
  4925. bnx2_disable_forced_2g5(bp);
  4926. bp->serdes_an_pending = 2;
  4927. bp->current_interval = BNX2_TIMER_INTERVAL;
  4928. }
  4929. } else
  4930. bp->current_interval = BNX2_TIMER_INTERVAL;
  4931. spin_unlock(&bp->phy_lock);
  4932. }
  4933. static void
  4934. bnx2_timer(unsigned long data)
  4935. {
  4936. struct bnx2 *bp = (struct bnx2 *) data;
  4937. if (!netif_running(bp->dev))
  4938. return;
  4939. if (atomic_read(&bp->intr_sem) != 0)
  4940. goto bnx2_restart_timer;
  4941. if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
  4942. BNX2_FLAG_USING_MSI)
  4943. bnx2_chk_missed_msi(bp);
  4944. bnx2_send_heart_beat(bp);
  4945. bp->stats_blk->stat_FwRxDrop =
  4946. bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
  4947. /* workaround occasional corrupted counters */
  4948. if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
  4949. REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
  4950. BNX2_HC_COMMAND_STATS_NOW);
  4951. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  4952. if (CHIP_NUM(bp) == CHIP_NUM_5706)
  4953. bnx2_5706_serdes_timer(bp);
  4954. else
  4955. bnx2_5708_serdes_timer(bp);
  4956. }
  4957. bnx2_restart_timer:
  4958. mod_timer(&bp->timer, jiffies + bp->current_interval);
  4959. }
  4960. static int
  4961. bnx2_request_irq(struct bnx2 *bp)
  4962. {
  4963. unsigned long flags;
  4964. struct bnx2_irq *irq;
  4965. int rc = 0, i;
  4966. if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
  4967. flags = 0;
  4968. else
  4969. flags = IRQF_SHARED;
  4970. for (i = 0; i < bp->irq_nvecs; i++) {
  4971. irq = &bp->irq_tbl[i];
  4972. rc = request_irq(irq->vector, irq->handler, flags, irq->name,
  4973. &bp->bnx2_napi[i]);
  4974. if (rc)
  4975. break;
  4976. irq->requested = 1;
  4977. }
  4978. return rc;
  4979. }
  4980. static void
  4981. __bnx2_free_irq(struct bnx2 *bp)
  4982. {
  4983. struct bnx2_irq *irq;
  4984. int i;
  4985. for (i = 0; i < bp->irq_nvecs; i++) {
  4986. irq = &bp->irq_tbl[i];
  4987. if (irq->requested)
  4988. free_irq(irq->vector, &bp->bnx2_napi[i]);
  4989. irq->requested = 0;
  4990. }
  4991. }
  4992. static void
  4993. bnx2_free_irq(struct bnx2 *bp)
  4994. {
  4995. __bnx2_free_irq(bp);
  4996. if (bp->flags & BNX2_FLAG_USING_MSI)
  4997. pci_disable_msi(bp->pdev);
  4998. else if (bp->flags & BNX2_FLAG_USING_MSIX)
  4999. pci_disable_msix(bp->pdev);
  5000. bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
  5001. }
  5002. static void
  5003. bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
  5004. {
  5005. int i, total_vecs, rc;
  5006. struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
  5007. struct net_device *dev = bp->dev;
  5008. const int len = sizeof(bp->irq_tbl[0].name);
  5009. bnx2_setup_msix_tbl(bp);
  5010. REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
  5011. REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
  5012. REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
  5013. /* Need to flush the previous three writes to ensure MSI-X
  5014. * is setup properly */
  5015. REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
  5016. for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
  5017. msix_ent[i].entry = i;
  5018. msix_ent[i].vector = 0;
  5019. }
  5020. total_vecs = msix_vecs;
  5021. #ifdef BCM_CNIC
  5022. total_vecs++;
  5023. #endif
  5024. rc = -ENOSPC;
  5025. while (total_vecs >= BNX2_MIN_MSIX_VEC) {
  5026. rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
  5027. if (rc <= 0)
  5028. break;
  5029. if (rc > 0)
  5030. total_vecs = rc;
  5031. }
  5032. if (rc != 0)
  5033. return;
  5034. msix_vecs = total_vecs;
  5035. #ifdef BCM_CNIC
  5036. msix_vecs--;
  5037. #endif
  5038. bp->irq_nvecs = msix_vecs;
  5039. bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
  5040. for (i = 0; i < total_vecs; i++) {
  5041. bp->irq_tbl[i].vector = msix_ent[i].vector;
  5042. snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
  5043. bp->irq_tbl[i].handler = bnx2_msi_1shot;
  5044. }
  5045. }
  5046. static int
  5047. bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
  5048. {
  5049. int cpus = num_online_cpus();
  5050. int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
  5051. bp->irq_tbl[0].handler = bnx2_interrupt;
  5052. strcpy(bp->irq_tbl[0].name, bp->dev->name);
  5053. bp->irq_nvecs = 1;
  5054. bp->irq_tbl[0].vector = bp->pdev->irq;
  5055. if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
  5056. bnx2_enable_msix(bp, msix_vecs);
  5057. if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
  5058. !(bp->flags & BNX2_FLAG_USING_MSIX)) {
  5059. if (pci_enable_msi(bp->pdev) == 0) {
  5060. bp->flags |= BNX2_FLAG_USING_MSI;
  5061. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  5062. bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
  5063. bp->irq_tbl[0].handler = bnx2_msi_1shot;
  5064. } else
  5065. bp->irq_tbl[0].handler = bnx2_msi;
  5066. bp->irq_tbl[0].vector = bp->pdev->irq;
  5067. }
  5068. }
  5069. bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
  5070. netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
  5071. bp->num_rx_rings = bp->irq_nvecs;
  5072. return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
  5073. }
  5074. /* Called with rtnl_lock */
  5075. static int
  5076. bnx2_open(struct net_device *dev)
  5077. {
  5078. struct bnx2 *bp = netdev_priv(dev);
  5079. int rc;
  5080. netif_carrier_off(dev);
  5081. bnx2_set_power_state(bp, PCI_D0);
  5082. bnx2_disable_int(bp);
  5083. rc = bnx2_setup_int_mode(bp, disable_msi);
  5084. if (rc)
  5085. goto open_err;
  5086. bnx2_init_napi(bp);
  5087. bnx2_napi_enable(bp);
  5088. rc = bnx2_alloc_mem(bp);
  5089. if (rc)
  5090. goto open_err;
  5091. rc = bnx2_request_irq(bp);
  5092. if (rc)
  5093. goto open_err;
  5094. rc = bnx2_init_nic(bp, 1);
  5095. if (rc)
  5096. goto open_err;
  5097. mod_timer(&bp->timer, jiffies + bp->current_interval);
  5098. atomic_set(&bp->intr_sem, 0);
  5099. memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
  5100. bnx2_enable_int(bp);
  5101. if (bp->flags & BNX2_FLAG_USING_MSI) {
  5102. /* Test MSI to make sure it is working
  5103. * If MSI test fails, go back to INTx mode
  5104. */
  5105. if (bnx2_test_intr(bp) != 0) {
  5106. netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
  5107. bnx2_disable_int(bp);
  5108. bnx2_free_irq(bp);
  5109. bnx2_setup_int_mode(bp, 1);
  5110. rc = bnx2_init_nic(bp, 0);
  5111. if (!rc)
  5112. rc = bnx2_request_irq(bp);
  5113. if (rc) {
  5114. del_timer_sync(&bp->timer);
  5115. goto open_err;
  5116. }
  5117. bnx2_enable_int(bp);
  5118. }
  5119. }
  5120. if (bp->flags & BNX2_FLAG_USING_MSI)
  5121. netdev_info(dev, "using MSI\n");
  5122. else if (bp->flags & BNX2_FLAG_USING_MSIX)
  5123. netdev_info(dev, "using MSIX\n");
  5124. netif_tx_start_all_queues(dev);
  5125. return 0;
  5126. open_err:
  5127. bnx2_napi_disable(bp);
  5128. bnx2_free_skbs(bp);
  5129. bnx2_free_irq(bp);
  5130. bnx2_free_mem(bp);
  5131. bnx2_del_napi(bp);
  5132. return rc;
  5133. }
  5134. static void
  5135. bnx2_reset_task(struct work_struct *work)
  5136. {
  5137. struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
  5138. rtnl_lock();
  5139. if (!netif_running(bp->dev)) {
  5140. rtnl_unlock();
  5141. return;
  5142. }
  5143. bnx2_netif_stop(bp, true);
  5144. bnx2_init_nic(bp, 1);
  5145. atomic_set(&bp->intr_sem, 1);
  5146. bnx2_netif_start(bp, true);
  5147. rtnl_unlock();
  5148. }
  5149. static void
  5150. bnx2_dump_state(struct bnx2 *bp)
  5151. {
  5152. struct net_device *dev = bp->dev;
  5153. u32 mcp_p0, mcp_p1, val1, val2;
  5154. pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
  5155. netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
  5156. atomic_read(&bp->intr_sem), val1);
  5157. pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
  5158. pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
  5159. netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
  5160. netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
  5161. REG_RD(bp, BNX2_EMAC_TX_STATUS),
  5162. REG_RD(bp, BNX2_EMAC_RX_STATUS));
  5163. netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
  5164. REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
  5165. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  5166. mcp_p0 = BNX2_MCP_STATE_P0;
  5167. mcp_p1 = BNX2_MCP_STATE_P1;
  5168. } else {
  5169. mcp_p0 = BNX2_MCP_STATE_P0_5708;
  5170. mcp_p1 = BNX2_MCP_STATE_P1_5708;
  5171. }
  5172. netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
  5173. bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
  5174. netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
  5175. REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
  5176. if (bp->flags & BNX2_FLAG_USING_MSIX)
  5177. netdev_err(dev, "DEBUG: PBA[%08x]\n",
  5178. REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
  5179. }
  5180. static void
  5181. bnx2_tx_timeout(struct net_device *dev)
  5182. {
  5183. struct bnx2 *bp = netdev_priv(dev);
  5184. bnx2_dump_state(bp);
  5185. /* This allows the netif to be shutdown gracefully before resetting */
  5186. schedule_work(&bp->reset_task);
  5187. }
  5188. /* Called with netif_tx_lock.
  5189. * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
  5190. * netif_wake_queue().
  5191. */
  5192. static netdev_tx_t
  5193. bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
  5194. {
  5195. struct bnx2 *bp = netdev_priv(dev);
  5196. dma_addr_t mapping;
  5197. struct tx_bd *txbd;
  5198. struct sw_tx_bd *tx_buf;
  5199. u32 len, vlan_tag_flags, last_frag, mss;
  5200. u16 prod, ring_prod;
  5201. int i;
  5202. struct bnx2_napi *bnapi;
  5203. struct bnx2_tx_ring_info *txr;
  5204. struct netdev_queue *txq;
  5205. /* Determine which tx ring we will be placed on */
  5206. i = skb_get_queue_mapping(skb);
  5207. bnapi = &bp->bnx2_napi[i];
  5208. txr = &bnapi->tx_ring;
  5209. txq = netdev_get_tx_queue(dev, i);
  5210. if (unlikely(bnx2_tx_avail(bp, txr) <
  5211. (skb_shinfo(skb)->nr_frags + 1))) {
  5212. netif_tx_stop_queue(txq);
  5213. netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
  5214. return NETDEV_TX_BUSY;
  5215. }
  5216. len = skb_headlen(skb);
  5217. prod = txr->tx_prod;
  5218. ring_prod = TX_RING_IDX(prod);
  5219. vlan_tag_flags = 0;
  5220. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  5221. vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
  5222. }
  5223. if (vlan_tx_tag_present(skb)) {
  5224. vlan_tag_flags |=
  5225. (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
  5226. }
  5227. if ((mss = skb_shinfo(skb)->gso_size)) {
  5228. u32 tcp_opt_len;
  5229. struct iphdr *iph;
  5230. vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
  5231. tcp_opt_len = tcp_optlen(skb);
  5232. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
  5233. u32 tcp_off = skb_transport_offset(skb) -
  5234. sizeof(struct ipv6hdr) - ETH_HLEN;
  5235. vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
  5236. TX_BD_FLAGS_SW_FLAGS;
  5237. if (likely(tcp_off == 0))
  5238. vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
  5239. else {
  5240. tcp_off >>= 3;
  5241. vlan_tag_flags |= ((tcp_off & 0x3) <<
  5242. TX_BD_FLAGS_TCP6_OFF0_SHL) |
  5243. ((tcp_off & 0x10) <<
  5244. TX_BD_FLAGS_TCP6_OFF4_SHL);
  5245. mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
  5246. }
  5247. } else {
  5248. iph = ip_hdr(skb);
  5249. if (tcp_opt_len || (iph->ihl > 5)) {
  5250. vlan_tag_flags |= ((iph->ihl - 5) +
  5251. (tcp_opt_len >> 2)) << 8;
  5252. }
  5253. }
  5254. } else
  5255. mss = 0;
  5256. mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
  5257. if (dma_mapping_error(&bp->pdev->dev, mapping)) {
  5258. dev_kfree_skb(skb);
  5259. return NETDEV_TX_OK;
  5260. }
  5261. tx_buf = &txr->tx_buf_ring[ring_prod];
  5262. tx_buf->skb = skb;
  5263. dma_unmap_addr_set(tx_buf, mapping, mapping);
  5264. txbd = &txr->tx_desc_ring[ring_prod];
  5265. txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
  5266. txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  5267. txbd->tx_bd_mss_nbytes = len | (mss << 16);
  5268. txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
  5269. last_frag = skb_shinfo(skb)->nr_frags;
  5270. tx_buf->nr_frags = last_frag;
  5271. tx_buf->is_gso = skb_is_gso(skb);
  5272. for (i = 0; i < last_frag; i++) {
  5273. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  5274. prod = NEXT_TX_BD(prod);
  5275. ring_prod = TX_RING_IDX(prod);
  5276. txbd = &txr->tx_desc_ring[ring_prod];
  5277. len = frag->size;
  5278. mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
  5279. len, PCI_DMA_TODEVICE);
  5280. if (dma_mapping_error(&bp->pdev->dev, mapping))
  5281. goto dma_error;
  5282. dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
  5283. mapping);
  5284. txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
  5285. txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
  5286. txbd->tx_bd_mss_nbytes = len | (mss << 16);
  5287. txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
  5288. }
  5289. txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
  5290. prod = NEXT_TX_BD(prod);
  5291. txr->tx_prod_bseq += skb->len;
  5292. REG_WR16(bp, txr->tx_bidx_addr, prod);
  5293. REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
  5294. mmiowb();
  5295. txr->tx_prod = prod;
  5296. if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
  5297. netif_tx_stop_queue(txq);
  5298. /* netif_tx_stop_queue() must be done before checking
  5299. * tx index in bnx2_tx_avail() below, because in
  5300. * bnx2_tx_int(), we update tx index before checking for
  5301. * netif_tx_queue_stopped().
  5302. */
  5303. smp_mb();
  5304. if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
  5305. netif_tx_wake_queue(txq);
  5306. }
  5307. return NETDEV_TX_OK;
  5308. dma_error:
  5309. /* save value of frag that failed */
  5310. last_frag = i;
  5311. /* start back at beginning and unmap skb */
  5312. prod = txr->tx_prod;
  5313. ring_prod = TX_RING_IDX(prod);
  5314. tx_buf = &txr->tx_buf_ring[ring_prod];
  5315. tx_buf->skb = NULL;
  5316. dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
  5317. skb_headlen(skb), PCI_DMA_TODEVICE);
  5318. /* unmap remaining mapped pages */
  5319. for (i = 0; i < last_frag; i++) {
  5320. prod = NEXT_TX_BD(prod);
  5321. ring_prod = TX_RING_IDX(prod);
  5322. tx_buf = &txr->tx_buf_ring[ring_prod];
  5323. dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
  5324. skb_shinfo(skb)->frags[i].size,
  5325. PCI_DMA_TODEVICE);
  5326. }
  5327. dev_kfree_skb(skb);
  5328. return NETDEV_TX_OK;
  5329. }
  5330. /* Called with rtnl_lock */
  5331. static int
  5332. bnx2_close(struct net_device *dev)
  5333. {
  5334. struct bnx2 *bp = netdev_priv(dev);
  5335. cancel_work_sync(&bp->reset_task);
  5336. bnx2_disable_int_sync(bp);
  5337. bnx2_napi_disable(bp);
  5338. del_timer_sync(&bp->timer);
  5339. bnx2_shutdown_chip(bp);
  5340. bnx2_free_irq(bp);
  5341. bnx2_free_skbs(bp);
  5342. bnx2_free_mem(bp);
  5343. bnx2_del_napi(bp);
  5344. bp->link_up = 0;
  5345. netif_carrier_off(bp->dev);
  5346. bnx2_set_power_state(bp, PCI_D3hot);
  5347. return 0;
  5348. }
  5349. static void
  5350. bnx2_save_stats(struct bnx2 *bp)
  5351. {
  5352. u32 *hw_stats = (u32 *) bp->stats_blk;
  5353. u32 *temp_stats = (u32 *) bp->temp_stats_blk;
  5354. int i;
  5355. /* The 1st 10 counters are 64-bit counters */
  5356. for (i = 0; i < 20; i += 2) {
  5357. u32 hi;
  5358. u64 lo;
  5359. hi = temp_stats[i] + hw_stats[i];
  5360. lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
  5361. if (lo > 0xffffffff)
  5362. hi++;
  5363. temp_stats[i] = hi;
  5364. temp_stats[i + 1] = lo & 0xffffffff;
  5365. }
  5366. for ( ; i < sizeof(struct statistics_block) / 4; i++)
  5367. temp_stats[i] += hw_stats[i];
  5368. }
  5369. #define GET_64BIT_NET_STATS64(ctr) \
  5370. (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
  5371. #define GET_64BIT_NET_STATS(ctr) \
  5372. GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
  5373. GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
  5374. #define GET_32BIT_NET_STATS(ctr) \
  5375. (unsigned long) (bp->stats_blk->ctr + \
  5376. bp->temp_stats_blk->ctr)
  5377. static struct rtnl_link_stats64 *
  5378. bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
  5379. {
  5380. struct bnx2 *bp = netdev_priv(dev);
  5381. if (bp->stats_blk == NULL)
  5382. return net_stats;
  5383. net_stats->rx_packets =
  5384. GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
  5385. GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
  5386. GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
  5387. net_stats->tx_packets =
  5388. GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
  5389. GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
  5390. GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
  5391. net_stats->rx_bytes =
  5392. GET_64BIT_NET_STATS(stat_IfHCInOctets);
  5393. net_stats->tx_bytes =
  5394. GET_64BIT_NET_STATS(stat_IfHCOutOctets);
  5395. net_stats->multicast =
  5396. GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
  5397. net_stats->collisions =
  5398. GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
  5399. net_stats->rx_length_errors =
  5400. GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
  5401. GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
  5402. net_stats->rx_over_errors =
  5403. GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
  5404. GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
  5405. net_stats->rx_frame_errors =
  5406. GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
  5407. net_stats->rx_crc_errors =
  5408. GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
  5409. net_stats->rx_errors = net_stats->rx_length_errors +
  5410. net_stats->rx_over_errors + net_stats->rx_frame_errors +
  5411. net_stats->rx_crc_errors;
  5412. net_stats->tx_aborted_errors =
  5413. GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
  5414. GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
  5415. if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
  5416. (CHIP_ID(bp) == CHIP_ID_5708_A0))
  5417. net_stats->tx_carrier_errors = 0;
  5418. else {
  5419. net_stats->tx_carrier_errors =
  5420. GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
  5421. }
  5422. net_stats->tx_errors =
  5423. GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
  5424. net_stats->tx_aborted_errors +
  5425. net_stats->tx_carrier_errors;
  5426. net_stats->rx_missed_errors =
  5427. GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
  5428. GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
  5429. GET_32BIT_NET_STATS(stat_FwRxDrop);
  5430. return net_stats;
  5431. }
  5432. /* All ethtool functions called with rtnl_lock */
  5433. static int
  5434. bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  5435. {
  5436. struct bnx2 *bp = netdev_priv(dev);
  5437. int support_serdes = 0, support_copper = 0;
  5438. cmd->supported = SUPPORTED_Autoneg;
  5439. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  5440. support_serdes = 1;
  5441. support_copper = 1;
  5442. } else if (bp->phy_port == PORT_FIBRE)
  5443. support_serdes = 1;
  5444. else
  5445. support_copper = 1;
  5446. if (support_serdes) {
  5447. cmd->supported |= SUPPORTED_1000baseT_Full |
  5448. SUPPORTED_FIBRE;
  5449. if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
  5450. cmd->supported |= SUPPORTED_2500baseX_Full;
  5451. }
  5452. if (support_copper) {
  5453. cmd->supported |= SUPPORTED_10baseT_Half |
  5454. SUPPORTED_10baseT_Full |
  5455. SUPPORTED_100baseT_Half |
  5456. SUPPORTED_100baseT_Full |
  5457. SUPPORTED_1000baseT_Full |
  5458. SUPPORTED_TP;
  5459. }
  5460. spin_lock_bh(&bp->phy_lock);
  5461. cmd->port = bp->phy_port;
  5462. cmd->advertising = bp->advertising;
  5463. if (bp->autoneg & AUTONEG_SPEED) {
  5464. cmd->autoneg = AUTONEG_ENABLE;
  5465. } else {
  5466. cmd->autoneg = AUTONEG_DISABLE;
  5467. }
  5468. if (netif_carrier_ok(dev)) {
  5469. ethtool_cmd_speed_set(cmd, bp->line_speed);
  5470. cmd->duplex = bp->duplex;
  5471. }
  5472. else {
  5473. ethtool_cmd_speed_set(cmd, -1);
  5474. cmd->duplex = -1;
  5475. }
  5476. spin_unlock_bh(&bp->phy_lock);
  5477. cmd->transceiver = XCVR_INTERNAL;
  5478. cmd->phy_address = bp->phy_addr;
  5479. return 0;
  5480. }
  5481. static int
  5482. bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  5483. {
  5484. struct bnx2 *bp = netdev_priv(dev);
  5485. u8 autoneg = bp->autoneg;
  5486. u8 req_duplex = bp->req_duplex;
  5487. u16 req_line_speed = bp->req_line_speed;
  5488. u32 advertising = bp->advertising;
  5489. int err = -EINVAL;
  5490. spin_lock_bh(&bp->phy_lock);
  5491. if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
  5492. goto err_out_unlock;
  5493. if (cmd->port != bp->phy_port &&
  5494. !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
  5495. goto err_out_unlock;
  5496. /* If device is down, we can store the settings only if the user
  5497. * is setting the currently active port.
  5498. */
  5499. if (!netif_running(dev) && cmd->port != bp->phy_port)
  5500. goto err_out_unlock;
  5501. if (cmd->autoneg == AUTONEG_ENABLE) {
  5502. autoneg |= AUTONEG_SPEED;
  5503. advertising = cmd->advertising;
  5504. if (cmd->port == PORT_TP) {
  5505. advertising &= ETHTOOL_ALL_COPPER_SPEED;
  5506. if (!advertising)
  5507. advertising = ETHTOOL_ALL_COPPER_SPEED;
  5508. } else {
  5509. advertising &= ETHTOOL_ALL_FIBRE_SPEED;
  5510. if (!advertising)
  5511. advertising = ETHTOOL_ALL_FIBRE_SPEED;
  5512. }
  5513. advertising |= ADVERTISED_Autoneg;
  5514. }
  5515. else {
  5516. u32 speed = ethtool_cmd_speed(cmd);
  5517. if (cmd->port == PORT_FIBRE) {
  5518. if ((speed != SPEED_1000 &&
  5519. speed != SPEED_2500) ||
  5520. (cmd->duplex != DUPLEX_FULL))
  5521. goto err_out_unlock;
  5522. if (speed == SPEED_2500 &&
  5523. !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  5524. goto err_out_unlock;
  5525. } else if (speed == SPEED_1000 || speed == SPEED_2500)
  5526. goto err_out_unlock;
  5527. autoneg &= ~AUTONEG_SPEED;
  5528. req_line_speed = speed;
  5529. req_duplex = cmd->duplex;
  5530. advertising = 0;
  5531. }
  5532. bp->autoneg = autoneg;
  5533. bp->advertising = advertising;
  5534. bp->req_line_speed = req_line_speed;
  5535. bp->req_duplex = req_duplex;
  5536. err = 0;
  5537. /* If device is down, the new settings will be picked up when it is
  5538. * brought up.
  5539. */
  5540. if (netif_running(dev))
  5541. err = bnx2_setup_phy(bp, cmd->port);
  5542. err_out_unlock:
  5543. spin_unlock_bh(&bp->phy_lock);
  5544. return err;
  5545. }
  5546. static void
  5547. bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  5548. {
  5549. struct bnx2 *bp = netdev_priv(dev);
  5550. strcpy(info->driver, DRV_MODULE_NAME);
  5551. strcpy(info->version, DRV_MODULE_VERSION);
  5552. strcpy(info->bus_info, pci_name(bp->pdev));
  5553. strcpy(info->fw_version, bp->fw_version);
  5554. }
  5555. #define BNX2_REGDUMP_LEN (32 * 1024)
  5556. static int
  5557. bnx2_get_regs_len(struct net_device *dev)
  5558. {
  5559. return BNX2_REGDUMP_LEN;
  5560. }
  5561. static void
  5562. bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
  5563. {
  5564. u32 *p = _p, i, offset;
  5565. u8 *orig_p = _p;
  5566. struct bnx2 *bp = netdev_priv(dev);
  5567. static const u32 reg_boundaries[] = {
  5568. 0x0000, 0x0098, 0x0400, 0x045c,
  5569. 0x0800, 0x0880, 0x0c00, 0x0c10,
  5570. 0x0c30, 0x0d08, 0x1000, 0x101c,
  5571. 0x1040, 0x1048, 0x1080, 0x10a4,
  5572. 0x1400, 0x1490, 0x1498, 0x14f0,
  5573. 0x1500, 0x155c, 0x1580, 0x15dc,
  5574. 0x1600, 0x1658, 0x1680, 0x16d8,
  5575. 0x1800, 0x1820, 0x1840, 0x1854,
  5576. 0x1880, 0x1894, 0x1900, 0x1984,
  5577. 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
  5578. 0x1c80, 0x1c94, 0x1d00, 0x1d84,
  5579. 0x2000, 0x2030, 0x23c0, 0x2400,
  5580. 0x2800, 0x2820, 0x2830, 0x2850,
  5581. 0x2b40, 0x2c10, 0x2fc0, 0x3058,
  5582. 0x3c00, 0x3c94, 0x4000, 0x4010,
  5583. 0x4080, 0x4090, 0x43c0, 0x4458,
  5584. 0x4c00, 0x4c18, 0x4c40, 0x4c54,
  5585. 0x4fc0, 0x5010, 0x53c0, 0x5444,
  5586. 0x5c00, 0x5c18, 0x5c80, 0x5c90,
  5587. 0x5fc0, 0x6000, 0x6400, 0x6428,
  5588. 0x6800, 0x6848, 0x684c, 0x6860,
  5589. 0x6888, 0x6910, 0x8000
  5590. };
  5591. regs->version = 0;
  5592. memset(p, 0, BNX2_REGDUMP_LEN);
  5593. if (!netif_running(bp->dev))
  5594. return;
  5595. i = 0;
  5596. offset = reg_boundaries[0];
  5597. p += offset;
  5598. while (offset < BNX2_REGDUMP_LEN) {
  5599. *p++ = REG_RD(bp, offset);
  5600. offset += 4;
  5601. if (offset == reg_boundaries[i + 1]) {
  5602. offset = reg_boundaries[i + 2];
  5603. p = (u32 *) (orig_p + offset);
  5604. i += 2;
  5605. }
  5606. }
  5607. }
  5608. static void
  5609. bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  5610. {
  5611. struct bnx2 *bp = netdev_priv(dev);
  5612. if (bp->flags & BNX2_FLAG_NO_WOL) {
  5613. wol->supported = 0;
  5614. wol->wolopts = 0;
  5615. }
  5616. else {
  5617. wol->supported = WAKE_MAGIC;
  5618. if (bp->wol)
  5619. wol->wolopts = WAKE_MAGIC;
  5620. else
  5621. wol->wolopts = 0;
  5622. }
  5623. memset(&wol->sopass, 0, sizeof(wol->sopass));
  5624. }
  5625. static int
  5626. bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  5627. {
  5628. struct bnx2 *bp = netdev_priv(dev);
  5629. if (wol->wolopts & ~WAKE_MAGIC)
  5630. return -EINVAL;
  5631. if (wol->wolopts & WAKE_MAGIC) {
  5632. if (bp->flags & BNX2_FLAG_NO_WOL)
  5633. return -EINVAL;
  5634. bp->wol = 1;
  5635. }
  5636. else {
  5637. bp->wol = 0;
  5638. }
  5639. return 0;
  5640. }
  5641. static int
  5642. bnx2_nway_reset(struct net_device *dev)
  5643. {
  5644. struct bnx2 *bp = netdev_priv(dev);
  5645. u32 bmcr;
  5646. if (!netif_running(dev))
  5647. return -EAGAIN;
  5648. if (!(bp->autoneg & AUTONEG_SPEED)) {
  5649. return -EINVAL;
  5650. }
  5651. spin_lock_bh(&bp->phy_lock);
  5652. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  5653. int rc;
  5654. rc = bnx2_setup_remote_phy(bp, bp->phy_port);
  5655. spin_unlock_bh(&bp->phy_lock);
  5656. return rc;
  5657. }
  5658. /* Force a link down visible on the other side */
  5659. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  5660. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  5661. spin_unlock_bh(&bp->phy_lock);
  5662. msleep(20);
  5663. spin_lock_bh(&bp->phy_lock);
  5664. bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
  5665. bp->serdes_an_pending = 1;
  5666. mod_timer(&bp->timer, jiffies + bp->current_interval);
  5667. }
  5668. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  5669. bmcr &= ~BMCR_LOOPBACK;
  5670. bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
  5671. spin_unlock_bh(&bp->phy_lock);
  5672. return 0;
  5673. }
  5674. static u32
  5675. bnx2_get_link(struct net_device *dev)
  5676. {
  5677. struct bnx2 *bp = netdev_priv(dev);
  5678. return bp->link_up;
  5679. }
  5680. static int
  5681. bnx2_get_eeprom_len(struct net_device *dev)
  5682. {
  5683. struct bnx2 *bp = netdev_priv(dev);
  5684. if (bp->flash_info == NULL)
  5685. return 0;
  5686. return (int) bp->flash_size;
  5687. }
  5688. static int
  5689. bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
  5690. u8 *eebuf)
  5691. {
  5692. struct bnx2 *bp = netdev_priv(dev);
  5693. int rc;
  5694. if (!netif_running(dev))
  5695. return -EAGAIN;
  5696. /* parameters already validated in ethtool_get_eeprom */
  5697. rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
  5698. return rc;
  5699. }
  5700. static int
  5701. bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
  5702. u8 *eebuf)
  5703. {
  5704. struct bnx2 *bp = netdev_priv(dev);
  5705. int rc;
  5706. if (!netif_running(dev))
  5707. return -EAGAIN;
  5708. /* parameters already validated in ethtool_set_eeprom */
  5709. rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
  5710. return rc;
  5711. }
  5712. static int
  5713. bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
  5714. {
  5715. struct bnx2 *bp = netdev_priv(dev);
  5716. memset(coal, 0, sizeof(struct ethtool_coalesce));
  5717. coal->rx_coalesce_usecs = bp->rx_ticks;
  5718. coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
  5719. coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
  5720. coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
  5721. coal->tx_coalesce_usecs = bp->tx_ticks;
  5722. coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
  5723. coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
  5724. coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
  5725. coal->stats_block_coalesce_usecs = bp->stats_ticks;
  5726. return 0;
  5727. }
  5728. static int
  5729. bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
  5730. {
  5731. struct bnx2 *bp = netdev_priv(dev);
  5732. bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
  5733. if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
  5734. bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
  5735. if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
  5736. bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
  5737. if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
  5738. bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
  5739. if (bp->rx_quick_cons_trip_int > 0xff)
  5740. bp->rx_quick_cons_trip_int = 0xff;
  5741. bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
  5742. if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
  5743. bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
  5744. if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
  5745. bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
  5746. if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
  5747. bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
  5748. if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
  5749. 0xff;
  5750. bp->stats_ticks = coal->stats_block_coalesce_usecs;
  5751. if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
  5752. if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
  5753. bp->stats_ticks = USEC_PER_SEC;
  5754. }
  5755. if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
  5756. bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  5757. bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  5758. if (netif_running(bp->dev)) {
  5759. bnx2_netif_stop(bp, true);
  5760. bnx2_init_nic(bp, 0);
  5761. bnx2_netif_start(bp, true);
  5762. }
  5763. return 0;
  5764. }
  5765. static void
  5766. bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
  5767. {
  5768. struct bnx2 *bp = netdev_priv(dev);
  5769. ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
  5770. ering->rx_mini_max_pending = 0;
  5771. ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
  5772. ering->rx_pending = bp->rx_ring_size;
  5773. ering->rx_mini_pending = 0;
  5774. ering->rx_jumbo_pending = bp->rx_pg_ring_size;
  5775. ering->tx_max_pending = MAX_TX_DESC_CNT;
  5776. ering->tx_pending = bp->tx_ring_size;
  5777. }
  5778. static int
  5779. bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
  5780. {
  5781. if (netif_running(bp->dev)) {
  5782. /* Reset will erase chipset stats; save them */
  5783. bnx2_save_stats(bp);
  5784. bnx2_netif_stop(bp, true);
  5785. bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
  5786. __bnx2_free_irq(bp);
  5787. bnx2_free_skbs(bp);
  5788. bnx2_free_mem(bp);
  5789. }
  5790. bnx2_set_rx_ring_size(bp, rx);
  5791. bp->tx_ring_size = tx;
  5792. if (netif_running(bp->dev)) {
  5793. int rc;
  5794. rc = bnx2_alloc_mem(bp);
  5795. if (!rc)
  5796. rc = bnx2_request_irq(bp);
  5797. if (!rc)
  5798. rc = bnx2_init_nic(bp, 0);
  5799. if (rc) {
  5800. bnx2_napi_enable(bp);
  5801. dev_close(bp->dev);
  5802. return rc;
  5803. }
  5804. #ifdef BCM_CNIC
  5805. mutex_lock(&bp->cnic_lock);
  5806. /* Let cnic know about the new status block. */
  5807. if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
  5808. bnx2_setup_cnic_irq_info(bp);
  5809. mutex_unlock(&bp->cnic_lock);
  5810. #endif
  5811. bnx2_netif_start(bp, true);
  5812. }
  5813. return 0;
  5814. }
  5815. static int
  5816. bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
  5817. {
  5818. struct bnx2 *bp = netdev_priv(dev);
  5819. int rc;
  5820. if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
  5821. (ering->tx_pending > MAX_TX_DESC_CNT) ||
  5822. (ering->tx_pending <= MAX_SKB_FRAGS)) {
  5823. return -EINVAL;
  5824. }
  5825. rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
  5826. return rc;
  5827. }
  5828. static void
  5829. bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
  5830. {
  5831. struct bnx2 *bp = netdev_priv(dev);
  5832. epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
  5833. epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
  5834. epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
  5835. }
  5836. static int
  5837. bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
  5838. {
  5839. struct bnx2 *bp = netdev_priv(dev);
  5840. bp->req_flow_ctrl = 0;
  5841. if (epause->rx_pause)
  5842. bp->req_flow_ctrl |= FLOW_CTRL_RX;
  5843. if (epause->tx_pause)
  5844. bp->req_flow_ctrl |= FLOW_CTRL_TX;
  5845. if (epause->autoneg) {
  5846. bp->autoneg |= AUTONEG_FLOW_CTRL;
  5847. }
  5848. else {
  5849. bp->autoneg &= ~AUTONEG_FLOW_CTRL;
  5850. }
  5851. if (netif_running(dev)) {
  5852. spin_lock_bh(&bp->phy_lock);
  5853. bnx2_setup_phy(bp, bp->phy_port);
  5854. spin_unlock_bh(&bp->phy_lock);
  5855. }
  5856. return 0;
  5857. }
  5858. static struct {
  5859. char string[ETH_GSTRING_LEN];
  5860. } bnx2_stats_str_arr[] = {
  5861. { "rx_bytes" },
  5862. { "rx_error_bytes" },
  5863. { "tx_bytes" },
  5864. { "tx_error_bytes" },
  5865. { "rx_ucast_packets" },
  5866. { "rx_mcast_packets" },
  5867. { "rx_bcast_packets" },
  5868. { "tx_ucast_packets" },
  5869. { "tx_mcast_packets" },
  5870. { "tx_bcast_packets" },
  5871. { "tx_mac_errors" },
  5872. { "tx_carrier_errors" },
  5873. { "rx_crc_errors" },
  5874. { "rx_align_errors" },
  5875. { "tx_single_collisions" },
  5876. { "tx_multi_collisions" },
  5877. { "tx_deferred" },
  5878. { "tx_excess_collisions" },
  5879. { "tx_late_collisions" },
  5880. { "tx_total_collisions" },
  5881. { "rx_fragments" },
  5882. { "rx_jabbers" },
  5883. { "rx_undersize_packets" },
  5884. { "rx_oversize_packets" },
  5885. { "rx_64_byte_packets" },
  5886. { "rx_65_to_127_byte_packets" },
  5887. { "rx_128_to_255_byte_packets" },
  5888. { "rx_256_to_511_byte_packets" },
  5889. { "rx_512_to_1023_byte_packets" },
  5890. { "rx_1024_to_1522_byte_packets" },
  5891. { "rx_1523_to_9022_byte_packets" },
  5892. { "tx_64_byte_packets" },
  5893. { "tx_65_to_127_byte_packets" },
  5894. { "tx_128_to_255_byte_packets" },
  5895. { "tx_256_to_511_byte_packets" },
  5896. { "tx_512_to_1023_byte_packets" },
  5897. { "tx_1024_to_1522_byte_packets" },
  5898. { "tx_1523_to_9022_byte_packets" },
  5899. { "rx_xon_frames" },
  5900. { "rx_xoff_frames" },
  5901. { "tx_xon_frames" },
  5902. { "tx_xoff_frames" },
  5903. { "rx_mac_ctrl_frames" },
  5904. { "rx_filtered_packets" },
  5905. { "rx_ftq_discards" },
  5906. { "rx_discards" },
  5907. { "rx_fw_discards" },
  5908. };
  5909. #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
  5910. sizeof(bnx2_stats_str_arr[0]))
  5911. #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
  5912. static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
  5913. STATS_OFFSET32(stat_IfHCInOctets_hi),
  5914. STATS_OFFSET32(stat_IfHCInBadOctets_hi),
  5915. STATS_OFFSET32(stat_IfHCOutOctets_hi),
  5916. STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
  5917. STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
  5918. STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
  5919. STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
  5920. STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
  5921. STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
  5922. STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
  5923. STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
  5924. STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
  5925. STATS_OFFSET32(stat_Dot3StatsFCSErrors),
  5926. STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
  5927. STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
  5928. STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
  5929. STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
  5930. STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
  5931. STATS_OFFSET32(stat_Dot3StatsLateCollisions),
  5932. STATS_OFFSET32(stat_EtherStatsCollisions),
  5933. STATS_OFFSET32(stat_EtherStatsFragments),
  5934. STATS_OFFSET32(stat_EtherStatsJabbers),
  5935. STATS_OFFSET32(stat_EtherStatsUndersizePkts),
  5936. STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
  5937. STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
  5938. STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
  5939. STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
  5940. STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
  5941. STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
  5942. STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
  5943. STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
  5944. STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
  5945. STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
  5946. STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
  5947. STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
  5948. STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
  5949. STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
  5950. STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
  5951. STATS_OFFSET32(stat_XonPauseFramesReceived),
  5952. STATS_OFFSET32(stat_XoffPauseFramesReceived),
  5953. STATS_OFFSET32(stat_OutXonSent),
  5954. STATS_OFFSET32(stat_OutXoffSent),
  5955. STATS_OFFSET32(stat_MacControlFramesReceived),
  5956. STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
  5957. STATS_OFFSET32(stat_IfInFTQDiscards),
  5958. STATS_OFFSET32(stat_IfInMBUFDiscards),
  5959. STATS_OFFSET32(stat_FwRxDrop),
  5960. };
  5961. /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
  5962. * skipped because of errata.
  5963. */
  5964. static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
  5965. 8,0,8,8,8,8,8,8,8,8,
  5966. 4,0,4,4,4,4,4,4,4,4,
  5967. 4,4,4,4,4,4,4,4,4,4,
  5968. 4,4,4,4,4,4,4,4,4,4,
  5969. 4,4,4,4,4,4,4,
  5970. };
  5971. static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
  5972. 8,0,8,8,8,8,8,8,8,8,
  5973. 4,4,4,4,4,4,4,4,4,4,
  5974. 4,4,4,4,4,4,4,4,4,4,
  5975. 4,4,4,4,4,4,4,4,4,4,
  5976. 4,4,4,4,4,4,4,
  5977. };
  5978. #define BNX2_NUM_TESTS 6
  5979. static struct {
  5980. char string[ETH_GSTRING_LEN];
  5981. } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
  5982. { "register_test (offline)" },
  5983. { "memory_test (offline)" },
  5984. { "loopback_test (offline)" },
  5985. { "nvram_test (online)" },
  5986. { "interrupt_test (online)" },
  5987. { "link_test (online)" },
  5988. };
  5989. static int
  5990. bnx2_get_sset_count(struct net_device *dev, int sset)
  5991. {
  5992. switch (sset) {
  5993. case ETH_SS_TEST:
  5994. return BNX2_NUM_TESTS;
  5995. case ETH_SS_STATS:
  5996. return BNX2_NUM_STATS;
  5997. default:
  5998. return -EOPNOTSUPP;
  5999. }
  6000. }
  6001. static void
  6002. bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
  6003. {
  6004. struct bnx2 *bp = netdev_priv(dev);
  6005. bnx2_set_power_state(bp, PCI_D0);
  6006. memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
  6007. if (etest->flags & ETH_TEST_FL_OFFLINE) {
  6008. int i;
  6009. bnx2_netif_stop(bp, true);
  6010. bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
  6011. bnx2_free_skbs(bp);
  6012. if (bnx2_test_registers(bp) != 0) {
  6013. buf[0] = 1;
  6014. etest->flags |= ETH_TEST_FL_FAILED;
  6015. }
  6016. if (bnx2_test_memory(bp) != 0) {
  6017. buf[1] = 1;
  6018. etest->flags |= ETH_TEST_FL_FAILED;
  6019. }
  6020. if ((buf[2] = bnx2_test_loopback(bp)) != 0)
  6021. etest->flags |= ETH_TEST_FL_FAILED;
  6022. if (!netif_running(bp->dev))
  6023. bnx2_shutdown_chip(bp);
  6024. else {
  6025. bnx2_init_nic(bp, 1);
  6026. bnx2_netif_start(bp, true);
  6027. }
  6028. /* wait for link up */
  6029. for (i = 0; i < 7; i++) {
  6030. if (bp->link_up)
  6031. break;
  6032. msleep_interruptible(1000);
  6033. }
  6034. }
  6035. if (bnx2_test_nvram(bp) != 0) {
  6036. buf[3] = 1;
  6037. etest->flags |= ETH_TEST_FL_FAILED;
  6038. }
  6039. if (bnx2_test_intr(bp) != 0) {
  6040. buf[4] = 1;
  6041. etest->flags |= ETH_TEST_FL_FAILED;
  6042. }
  6043. if (bnx2_test_link(bp) != 0) {
  6044. buf[5] = 1;
  6045. etest->flags |= ETH_TEST_FL_FAILED;
  6046. }
  6047. if (!netif_running(bp->dev))
  6048. bnx2_set_power_state(bp, PCI_D3hot);
  6049. }
  6050. static void
  6051. bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
  6052. {
  6053. switch (stringset) {
  6054. case ETH_SS_STATS:
  6055. memcpy(buf, bnx2_stats_str_arr,
  6056. sizeof(bnx2_stats_str_arr));
  6057. break;
  6058. case ETH_SS_TEST:
  6059. memcpy(buf, bnx2_tests_str_arr,
  6060. sizeof(bnx2_tests_str_arr));
  6061. break;
  6062. }
  6063. }
  6064. static void
  6065. bnx2_get_ethtool_stats(struct net_device *dev,
  6066. struct ethtool_stats *stats, u64 *buf)
  6067. {
  6068. struct bnx2 *bp = netdev_priv(dev);
  6069. int i;
  6070. u32 *hw_stats = (u32 *) bp->stats_blk;
  6071. u32 *temp_stats = (u32 *) bp->temp_stats_blk;
  6072. u8 *stats_len_arr = NULL;
  6073. if (hw_stats == NULL) {
  6074. memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
  6075. return;
  6076. }
  6077. if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
  6078. (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
  6079. (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
  6080. (CHIP_ID(bp) == CHIP_ID_5708_A0))
  6081. stats_len_arr = bnx2_5706_stats_len_arr;
  6082. else
  6083. stats_len_arr = bnx2_5708_stats_len_arr;
  6084. for (i = 0; i < BNX2_NUM_STATS; i++) {
  6085. unsigned long offset;
  6086. if (stats_len_arr[i] == 0) {
  6087. /* skip this counter */
  6088. buf[i] = 0;
  6089. continue;
  6090. }
  6091. offset = bnx2_stats_offset_arr[i];
  6092. if (stats_len_arr[i] == 4) {
  6093. /* 4-byte counter */
  6094. buf[i] = (u64) *(hw_stats + offset) +
  6095. *(temp_stats + offset);
  6096. continue;
  6097. }
  6098. /* 8-byte counter */
  6099. buf[i] = (((u64) *(hw_stats + offset)) << 32) +
  6100. *(hw_stats + offset + 1) +
  6101. (((u64) *(temp_stats + offset)) << 32) +
  6102. *(temp_stats + offset + 1);
  6103. }
  6104. }
  6105. static int
  6106. bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
  6107. {
  6108. struct bnx2 *bp = netdev_priv(dev);
  6109. switch (state) {
  6110. case ETHTOOL_ID_ACTIVE:
  6111. bnx2_set_power_state(bp, PCI_D0);
  6112. bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
  6113. REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
  6114. return 1; /* cycle on/off once per second */
  6115. case ETHTOOL_ID_ON:
  6116. REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
  6117. BNX2_EMAC_LED_1000MB_OVERRIDE |
  6118. BNX2_EMAC_LED_100MB_OVERRIDE |
  6119. BNX2_EMAC_LED_10MB_OVERRIDE |
  6120. BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
  6121. BNX2_EMAC_LED_TRAFFIC);
  6122. break;
  6123. case ETHTOOL_ID_OFF:
  6124. REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
  6125. break;
  6126. case ETHTOOL_ID_INACTIVE:
  6127. REG_WR(bp, BNX2_EMAC_LED, 0);
  6128. REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
  6129. if (!netif_running(dev))
  6130. bnx2_set_power_state(bp, PCI_D3hot);
  6131. break;
  6132. }
  6133. return 0;
  6134. }
  6135. static u32
  6136. bnx2_fix_features(struct net_device *dev, u32 features)
  6137. {
  6138. struct bnx2 *bp = netdev_priv(dev);
  6139. if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
  6140. features |= NETIF_F_HW_VLAN_RX;
  6141. return features;
  6142. }
  6143. static int
  6144. bnx2_set_features(struct net_device *dev, u32 features)
  6145. {
  6146. struct bnx2 *bp = netdev_priv(dev);
  6147. /* TSO with VLAN tag won't work with current firmware */
  6148. if (features & NETIF_F_HW_VLAN_TX)
  6149. dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
  6150. else
  6151. dev->vlan_features &= ~NETIF_F_ALL_TSO;
  6152. if ((!!(features & NETIF_F_HW_VLAN_RX) !=
  6153. !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
  6154. netif_running(dev)) {
  6155. bnx2_netif_stop(bp, false);
  6156. dev->features = features;
  6157. bnx2_set_rx_mode(dev);
  6158. bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
  6159. bnx2_netif_start(bp, false);
  6160. return 1;
  6161. }
  6162. return 0;
  6163. }
  6164. static const struct ethtool_ops bnx2_ethtool_ops = {
  6165. .get_settings = bnx2_get_settings,
  6166. .set_settings = bnx2_set_settings,
  6167. .get_drvinfo = bnx2_get_drvinfo,
  6168. .get_regs_len = bnx2_get_regs_len,
  6169. .get_regs = bnx2_get_regs,
  6170. .get_wol = bnx2_get_wol,
  6171. .set_wol = bnx2_set_wol,
  6172. .nway_reset = bnx2_nway_reset,
  6173. .get_link = bnx2_get_link,
  6174. .get_eeprom_len = bnx2_get_eeprom_len,
  6175. .get_eeprom = bnx2_get_eeprom,
  6176. .set_eeprom = bnx2_set_eeprom,
  6177. .get_coalesce = bnx2_get_coalesce,
  6178. .set_coalesce = bnx2_set_coalesce,
  6179. .get_ringparam = bnx2_get_ringparam,
  6180. .set_ringparam = bnx2_set_ringparam,
  6181. .get_pauseparam = bnx2_get_pauseparam,
  6182. .set_pauseparam = bnx2_set_pauseparam,
  6183. .self_test = bnx2_self_test,
  6184. .get_strings = bnx2_get_strings,
  6185. .set_phys_id = bnx2_set_phys_id,
  6186. .get_ethtool_stats = bnx2_get_ethtool_stats,
  6187. .get_sset_count = bnx2_get_sset_count,
  6188. };
  6189. /* Called with rtnl_lock */
  6190. static int
  6191. bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  6192. {
  6193. struct mii_ioctl_data *data = if_mii(ifr);
  6194. struct bnx2 *bp = netdev_priv(dev);
  6195. int err;
  6196. switch(cmd) {
  6197. case SIOCGMIIPHY:
  6198. data->phy_id = bp->phy_addr;
  6199. /* fallthru */
  6200. case SIOCGMIIREG: {
  6201. u32 mii_regval;
  6202. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  6203. return -EOPNOTSUPP;
  6204. if (!netif_running(dev))
  6205. return -EAGAIN;
  6206. spin_lock_bh(&bp->phy_lock);
  6207. err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
  6208. spin_unlock_bh(&bp->phy_lock);
  6209. data->val_out = mii_regval;
  6210. return err;
  6211. }
  6212. case SIOCSMIIREG:
  6213. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  6214. return -EOPNOTSUPP;
  6215. if (!netif_running(dev))
  6216. return -EAGAIN;
  6217. spin_lock_bh(&bp->phy_lock);
  6218. err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
  6219. spin_unlock_bh(&bp->phy_lock);
  6220. return err;
  6221. default:
  6222. /* do nothing */
  6223. break;
  6224. }
  6225. return -EOPNOTSUPP;
  6226. }
  6227. /* Called with rtnl_lock */
  6228. static int
  6229. bnx2_change_mac_addr(struct net_device *dev, void *p)
  6230. {
  6231. struct sockaddr *addr = p;
  6232. struct bnx2 *bp = netdev_priv(dev);
  6233. if (!is_valid_ether_addr(addr->sa_data))
  6234. return -EINVAL;
  6235. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  6236. if (netif_running(dev))
  6237. bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
  6238. return 0;
  6239. }
  6240. /* Called with rtnl_lock */
  6241. static int
  6242. bnx2_change_mtu(struct net_device *dev, int new_mtu)
  6243. {
  6244. struct bnx2 *bp = netdev_priv(dev);
  6245. if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
  6246. ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
  6247. return -EINVAL;
  6248. dev->mtu = new_mtu;
  6249. return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
  6250. }
  6251. #ifdef CONFIG_NET_POLL_CONTROLLER
  6252. static void
  6253. poll_bnx2(struct net_device *dev)
  6254. {
  6255. struct bnx2 *bp = netdev_priv(dev);
  6256. int i;
  6257. for (i = 0; i < bp->irq_nvecs; i++) {
  6258. struct bnx2_irq *irq = &bp->irq_tbl[i];
  6259. disable_irq(irq->vector);
  6260. irq->handler(irq->vector, &bp->bnx2_napi[i]);
  6261. enable_irq(irq->vector);
  6262. }
  6263. }
  6264. #endif
  6265. static void __devinit
  6266. bnx2_get_5709_media(struct bnx2 *bp)
  6267. {
  6268. u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
  6269. u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
  6270. u32 strap;
  6271. if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
  6272. return;
  6273. else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
  6274. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6275. return;
  6276. }
  6277. if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
  6278. strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
  6279. else
  6280. strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
  6281. if (PCI_FUNC(bp->pdev->devfn) == 0) {
  6282. switch (strap) {
  6283. case 0x4:
  6284. case 0x5:
  6285. case 0x6:
  6286. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6287. return;
  6288. }
  6289. } else {
  6290. switch (strap) {
  6291. case 0x1:
  6292. case 0x2:
  6293. case 0x4:
  6294. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6295. return;
  6296. }
  6297. }
  6298. }
  6299. static void __devinit
  6300. bnx2_get_pci_speed(struct bnx2 *bp)
  6301. {
  6302. u32 reg;
  6303. reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
  6304. if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
  6305. u32 clkreg;
  6306. bp->flags |= BNX2_FLAG_PCIX;
  6307. clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
  6308. clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
  6309. switch (clkreg) {
  6310. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
  6311. bp->bus_speed_mhz = 133;
  6312. break;
  6313. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
  6314. bp->bus_speed_mhz = 100;
  6315. break;
  6316. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
  6317. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
  6318. bp->bus_speed_mhz = 66;
  6319. break;
  6320. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
  6321. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
  6322. bp->bus_speed_mhz = 50;
  6323. break;
  6324. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
  6325. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
  6326. case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
  6327. bp->bus_speed_mhz = 33;
  6328. break;
  6329. }
  6330. }
  6331. else {
  6332. if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
  6333. bp->bus_speed_mhz = 66;
  6334. else
  6335. bp->bus_speed_mhz = 33;
  6336. }
  6337. if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
  6338. bp->flags |= BNX2_FLAG_PCI_32BIT;
  6339. }
  6340. static void __devinit
  6341. bnx2_read_vpd_fw_ver(struct bnx2 *bp)
  6342. {
  6343. int rc, i, j;
  6344. u8 *data;
  6345. unsigned int block_end, rosize, len;
  6346. #define BNX2_VPD_NVRAM_OFFSET 0x300
  6347. #define BNX2_VPD_LEN 128
  6348. #define BNX2_MAX_VER_SLEN 30
  6349. data = kmalloc(256, GFP_KERNEL);
  6350. if (!data)
  6351. return;
  6352. rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
  6353. BNX2_VPD_LEN);
  6354. if (rc)
  6355. goto vpd_done;
  6356. for (i = 0; i < BNX2_VPD_LEN; i += 4) {
  6357. data[i] = data[i + BNX2_VPD_LEN + 3];
  6358. data[i + 1] = data[i + BNX2_VPD_LEN + 2];
  6359. data[i + 2] = data[i + BNX2_VPD_LEN + 1];
  6360. data[i + 3] = data[i + BNX2_VPD_LEN];
  6361. }
  6362. i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
  6363. if (i < 0)
  6364. goto vpd_done;
  6365. rosize = pci_vpd_lrdt_size(&data[i]);
  6366. i += PCI_VPD_LRDT_TAG_SIZE;
  6367. block_end = i + rosize;
  6368. if (block_end > BNX2_VPD_LEN)
  6369. goto vpd_done;
  6370. j = pci_vpd_find_info_keyword(data, i, rosize,
  6371. PCI_VPD_RO_KEYWORD_MFR_ID);
  6372. if (j < 0)
  6373. goto vpd_done;
  6374. len = pci_vpd_info_field_size(&data[j]);
  6375. j += PCI_VPD_INFO_FLD_HDR_SIZE;
  6376. if (j + len > block_end || len != 4 ||
  6377. memcmp(&data[j], "1028", 4))
  6378. goto vpd_done;
  6379. j = pci_vpd_find_info_keyword(data, i, rosize,
  6380. PCI_VPD_RO_KEYWORD_VENDOR0);
  6381. if (j < 0)
  6382. goto vpd_done;
  6383. len = pci_vpd_info_field_size(&data[j]);
  6384. j += PCI_VPD_INFO_FLD_HDR_SIZE;
  6385. if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
  6386. goto vpd_done;
  6387. memcpy(bp->fw_version, &data[j], len);
  6388. bp->fw_version[len] = ' ';
  6389. vpd_done:
  6390. kfree(data);
  6391. }
  6392. static int __devinit
  6393. bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
  6394. {
  6395. struct bnx2 *bp;
  6396. unsigned long mem_len;
  6397. int rc, i, j;
  6398. u32 reg;
  6399. u64 dma_mask, persist_dma_mask;
  6400. int err;
  6401. SET_NETDEV_DEV(dev, &pdev->dev);
  6402. bp = netdev_priv(dev);
  6403. bp->flags = 0;
  6404. bp->phy_flags = 0;
  6405. bp->temp_stats_blk =
  6406. kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
  6407. if (bp->temp_stats_blk == NULL) {
  6408. rc = -ENOMEM;
  6409. goto err_out;
  6410. }
  6411. /* enable device (incl. PCI PM wakeup), and bus-mastering */
  6412. rc = pci_enable_device(pdev);
  6413. if (rc) {
  6414. dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
  6415. goto err_out;
  6416. }
  6417. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  6418. dev_err(&pdev->dev,
  6419. "Cannot find PCI device base address, aborting\n");
  6420. rc = -ENODEV;
  6421. goto err_out_disable;
  6422. }
  6423. rc = pci_request_regions(pdev, DRV_MODULE_NAME);
  6424. if (rc) {
  6425. dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
  6426. goto err_out_disable;
  6427. }
  6428. pci_set_master(pdev);
  6429. bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
  6430. if (bp->pm_cap == 0) {
  6431. dev_err(&pdev->dev,
  6432. "Cannot find power management capability, aborting\n");
  6433. rc = -EIO;
  6434. goto err_out_release;
  6435. }
  6436. bp->dev = dev;
  6437. bp->pdev = pdev;
  6438. spin_lock_init(&bp->phy_lock);
  6439. spin_lock_init(&bp->indirect_lock);
  6440. #ifdef BCM_CNIC
  6441. mutex_init(&bp->cnic_lock);
  6442. #endif
  6443. INIT_WORK(&bp->reset_task, bnx2_reset_task);
  6444. dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
  6445. mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
  6446. dev->mem_end = dev->mem_start + mem_len;
  6447. dev->irq = pdev->irq;
  6448. bp->regview = ioremap_nocache(dev->base_addr, mem_len);
  6449. if (!bp->regview) {
  6450. dev_err(&pdev->dev, "Cannot map register space, aborting\n");
  6451. rc = -ENOMEM;
  6452. goto err_out_release;
  6453. }
  6454. bnx2_set_power_state(bp, PCI_D0);
  6455. /* Configure byte swap and enable write to the reg_window registers.
  6456. * Rely on CPU to do target byte swapping on big endian systems
  6457. * The chip's target access swapping will not swap all accesses
  6458. */
  6459. REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
  6460. BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
  6461. BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
  6462. bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
  6463. if (CHIP_NUM(bp) == CHIP_NUM_5709) {
  6464. if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
  6465. dev_err(&pdev->dev,
  6466. "Cannot find PCIE capability, aborting\n");
  6467. rc = -EIO;
  6468. goto err_out_unmap;
  6469. }
  6470. bp->flags |= BNX2_FLAG_PCIE;
  6471. if (CHIP_REV(bp) == CHIP_REV_Ax)
  6472. bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
  6473. /* AER (Advanced Error Reporting) hooks */
  6474. err = pci_enable_pcie_error_reporting(pdev);
  6475. if (!err)
  6476. bp->flags |= BNX2_FLAG_AER_ENABLED;
  6477. } else {
  6478. bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
  6479. if (bp->pcix_cap == 0) {
  6480. dev_err(&pdev->dev,
  6481. "Cannot find PCIX capability, aborting\n");
  6482. rc = -EIO;
  6483. goto err_out_unmap;
  6484. }
  6485. bp->flags |= BNX2_FLAG_BROKEN_STATS;
  6486. }
  6487. if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
  6488. if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
  6489. bp->flags |= BNX2_FLAG_MSIX_CAP;
  6490. }
  6491. if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
  6492. if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
  6493. bp->flags |= BNX2_FLAG_MSI_CAP;
  6494. }
  6495. /* 5708 cannot support DMA addresses > 40-bit. */
  6496. if (CHIP_NUM(bp) == CHIP_NUM_5708)
  6497. persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
  6498. else
  6499. persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
  6500. /* Configure DMA attributes. */
  6501. if (pci_set_dma_mask(pdev, dma_mask) == 0) {
  6502. dev->features |= NETIF_F_HIGHDMA;
  6503. rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
  6504. if (rc) {
  6505. dev_err(&pdev->dev,
  6506. "pci_set_consistent_dma_mask failed, aborting\n");
  6507. goto err_out_unmap;
  6508. }
  6509. } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
  6510. dev_err(&pdev->dev, "System does not support DMA, aborting\n");
  6511. goto err_out_unmap;
  6512. }
  6513. if (!(bp->flags & BNX2_FLAG_PCIE))
  6514. bnx2_get_pci_speed(bp);
  6515. /* 5706A0 may falsely detect SERR and PERR. */
  6516. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  6517. reg = REG_RD(bp, PCI_COMMAND);
  6518. reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
  6519. REG_WR(bp, PCI_COMMAND, reg);
  6520. }
  6521. else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
  6522. !(bp->flags & BNX2_FLAG_PCIX)) {
  6523. dev_err(&pdev->dev,
  6524. "5706 A1 can only be used in a PCIX bus, aborting\n");
  6525. goto err_out_unmap;
  6526. }
  6527. bnx2_init_nvram(bp);
  6528. reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
  6529. if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
  6530. BNX2_SHM_HDR_SIGNATURE_SIG) {
  6531. u32 off = PCI_FUNC(pdev->devfn) << 2;
  6532. bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
  6533. } else
  6534. bp->shmem_base = HOST_VIEW_SHMEM_BASE;
  6535. /* Get the permanent MAC address. First we need to make sure the
  6536. * firmware is actually running.
  6537. */
  6538. reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
  6539. if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
  6540. BNX2_DEV_INFO_SIGNATURE_MAGIC) {
  6541. dev_err(&pdev->dev, "Firmware not running, aborting\n");
  6542. rc = -ENODEV;
  6543. goto err_out_unmap;
  6544. }
  6545. bnx2_read_vpd_fw_ver(bp);
  6546. j = strlen(bp->fw_version);
  6547. reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
  6548. for (i = 0; i < 3 && j < 24; i++) {
  6549. u8 num, k, skip0;
  6550. if (i == 0) {
  6551. bp->fw_version[j++] = 'b';
  6552. bp->fw_version[j++] = 'c';
  6553. bp->fw_version[j++] = ' ';
  6554. }
  6555. num = (u8) (reg >> (24 - (i * 8)));
  6556. for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
  6557. if (num >= k || !skip0 || k == 1) {
  6558. bp->fw_version[j++] = (num / k) + '0';
  6559. skip0 = 0;
  6560. }
  6561. }
  6562. if (i != 2)
  6563. bp->fw_version[j++] = '.';
  6564. }
  6565. reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
  6566. if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
  6567. bp->wol = 1;
  6568. if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
  6569. bp->flags |= BNX2_FLAG_ASF_ENABLE;
  6570. for (i = 0; i < 30; i++) {
  6571. reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
  6572. if (reg & BNX2_CONDITION_MFW_RUN_MASK)
  6573. break;
  6574. msleep(10);
  6575. }
  6576. }
  6577. reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
  6578. reg &= BNX2_CONDITION_MFW_RUN_MASK;
  6579. if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
  6580. reg != BNX2_CONDITION_MFW_RUN_NONE) {
  6581. u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
  6582. if (j < 32)
  6583. bp->fw_version[j++] = ' ';
  6584. for (i = 0; i < 3 && j < 28; i++) {
  6585. reg = bnx2_reg_rd_ind(bp, addr + i * 4);
  6586. reg = swab32(reg);
  6587. memcpy(&bp->fw_version[j], &reg, 4);
  6588. j += 4;
  6589. }
  6590. }
  6591. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
  6592. bp->mac_addr[0] = (u8) (reg >> 8);
  6593. bp->mac_addr[1] = (u8) reg;
  6594. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
  6595. bp->mac_addr[2] = (u8) (reg >> 24);
  6596. bp->mac_addr[3] = (u8) (reg >> 16);
  6597. bp->mac_addr[4] = (u8) (reg >> 8);
  6598. bp->mac_addr[5] = (u8) reg;
  6599. bp->tx_ring_size = MAX_TX_DESC_CNT;
  6600. bnx2_set_rx_ring_size(bp, 255);
  6601. bp->tx_quick_cons_trip_int = 2;
  6602. bp->tx_quick_cons_trip = 20;
  6603. bp->tx_ticks_int = 18;
  6604. bp->tx_ticks = 80;
  6605. bp->rx_quick_cons_trip_int = 2;
  6606. bp->rx_quick_cons_trip = 12;
  6607. bp->rx_ticks_int = 18;
  6608. bp->rx_ticks = 18;
  6609. bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
  6610. bp->current_interval = BNX2_TIMER_INTERVAL;
  6611. bp->phy_addr = 1;
  6612. /* Disable WOL support if we are running on a SERDES chip. */
  6613. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  6614. bnx2_get_5709_media(bp);
  6615. else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
  6616. bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
  6617. bp->phy_port = PORT_TP;
  6618. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  6619. bp->phy_port = PORT_FIBRE;
  6620. reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
  6621. if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
  6622. bp->flags |= BNX2_FLAG_NO_WOL;
  6623. bp->wol = 0;
  6624. }
  6625. if (CHIP_NUM(bp) == CHIP_NUM_5706) {
  6626. /* Don't do parallel detect on this board because of
  6627. * some board problems. The link will not go down
  6628. * if we do parallel detect.
  6629. */
  6630. if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
  6631. pdev->subsystem_device == 0x310c)
  6632. bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
  6633. } else {
  6634. bp->phy_addr = 2;
  6635. if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
  6636. bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
  6637. }
  6638. } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
  6639. CHIP_NUM(bp) == CHIP_NUM_5708)
  6640. bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
  6641. else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
  6642. (CHIP_REV(bp) == CHIP_REV_Ax ||
  6643. CHIP_REV(bp) == CHIP_REV_Bx))
  6644. bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
  6645. bnx2_init_fw_cap(bp);
  6646. if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
  6647. (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
  6648. (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
  6649. !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
  6650. bp->flags |= BNX2_FLAG_NO_WOL;
  6651. bp->wol = 0;
  6652. }
  6653. if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
  6654. bp->tx_quick_cons_trip_int =
  6655. bp->tx_quick_cons_trip;
  6656. bp->tx_ticks_int = bp->tx_ticks;
  6657. bp->rx_quick_cons_trip_int =
  6658. bp->rx_quick_cons_trip;
  6659. bp->rx_ticks_int = bp->rx_ticks;
  6660. bp->comp_prod_trip_int = bp->comp_prod_trip;
  6661. bp->com_ticks_int = bp->com_ticks;
  6662. bp->cmd_ticks_int = bp->cmd_ticks;
  6663. }
  6664. /* Disable MSI on 5706 if AMD 8132 bridge is found.
  6665. *
  6666. * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
  6667. * with byte enables disabled on the unused 32-bit word. This is legal
  6668. * but causes problems on the AMD 8132 which will eventually stop
  6669. * responding after a while.
  6670. *
  6671. * AMD believes this incompatibility is unique to the 5706, and
  6672. * prefers to locally disable MSI rather than globally disabling it.
  6673. */
  6674. if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
  6675. struct pci_dev *amd_8132 = NULL;
  6676. while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
  6677. PCI_DEVICE_ID_AMD_8132_BRIDGE,
  6678. amd_8132))) {
  6679. if (amd_8132->revision >= 0x10 &&
  6680. amd_8132->revision <= 0x13) {
  6681. disable_msi = 1;
  6682. pci_dev_put(amd_8132);
  6683. break;
  6684. }
  6685. }
  6686. }
  6687. bnx2_set_default_link(bp);
  6688. bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
  6689. init_timer(&bp->timer);
  6690. bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
  6691. bp->timer.data = (unsigned long) bp;
  6692. bp->timer.function = bnx2_timer;
  6693. #ifdef BCM_CNIC
  6694. bp->cnic_eth_dev.max_iscsi_conn =
  6695. bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN);
  6696. #endif
  6697. pci_save_state(pdev);
  6698. return 0;
  6699. err_out_unmap:
  6700. if (bp->flags & BNX2_FLAG_AER_ENABLED) {
  6701. pci_disable_pcie_error_reporting(pdev);
  6702. bp->flags &= ~BNX2_FLAG_AER_ENABLED;
  6703. }
  6704. if (bp->regview) {
  6705. iounmap(bp->regview);
  6706. bp->regview = NULL;
  6707. }
  6708. err_out_release:
  6709. pci_release_regions(pdev);
  6710. err_out_disable:
  6711. pci_disable_device(pdev);
  6712. pci_set_drvdata(pdev, NULL);
  6713. err_out:
  6714. return rc;
  6715. }
  6716. static char * __devinit
  6717. bnx2_bus_string(struct bnx2 *bp, char *str)
  6718. {
  6719. char *s = str;
  6720. if (bp->flags & BNX2_FLAG_PCIE) {
  6721. s += sprintf(s, "PCI Express");
  6722. } else {
  6723. s += sprintf(s, "PCI");
  6724. if (bp->flags & BNX2_FLAG_PCIX)
  6725. s += sprintf(s, "-X");
  6726. if (bp->flags & BNX2_FLAG_PCI_32BIT)
  6727. s += sprintf(s, " 32-bit");
  6728. else
  6729. s += sprintf(s, " 64-bit");
  6730. s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
  6731. }
  6732. return str;
  6733. }
  6734. static void
  6735. bnx2_del_napi(struct bnx2 *bp)
  6736. {
  6737. int i;
  6738. for (i = 0; i < bp->irq_nvecs; i++)
  6739. netif_napi_del(&bp->bnx2_napi[i].napi);
  6740. }
  6741. static void
  6742. bnx2_init_napi(struct bnx2 *bp)
  6743. {
  6744. int i;
  6745. for (i = 0; i < bp->irq_nvecs; i++) {
  6746. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  6747. int (*poll)(struct napi_struct *, int);
  6748. if (i == 0)
  6749. poll = bnx2_poll;
  6750. else
  6751. poll = bnx2_poll_msix;
  6752. netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
  6753. bnapi->bp = bp;
  6754. }
  6755. }
  6756. static const struct net_device_ops bnx2_netdev_ops = {
  6757. .ndo_open = bnx2_open,
  6758. .ndo_start_xmit = bnx2_start_xmit,
  6759. .ndo_stop = bnx2_close,
  6760. .ndo_get_stats64 = bnx2_get_stats64,
  6761. .ndo_set_rx_mode = bnx2_set_rx_mode,
  6762. .ndo_do_ioctl = bnx2_ioctl,
  6763. .ndo_validate_addr = eth_validate_addr,
  6764. .ndo_set_mac_address = bnx2_change_mac_addr,
  6765. .ndo_change_mtu = bnx2_change_mtu,
  6766. .ndo_fix_features = bnx2_fix_features,
  6767. .ndo_set_features = bnx2_set_features,
  6768. .ndo_tx_timeout = bnx2_tx_timeout,
  6769. #ifdef CONFIG_NET_POLL_CONTROLLER
  6770. .ndo_poll_controller = poll_bnx2,
  6771. #endif
  6772. };
  6773. static int __devinit
  6774. bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  6775. {
  6776. static int version_printed = 0;
  6777. struct net_device *dev = NULL;
  6778. struct bnx2 *bp;
  6779. int rc;
  6780. char str[40];
  6781. if (version_printed++ == 0)
  6782. pr_info("%s", version);
  6783. /* dev zeroed in init_etherdev */
  6784. dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
  6785. if (!dev)
  6786. return -ENOMEM;
  6787. rc = bnx2_init_board(pdev, dev);
  6788. if (rc < 0) {
  6789. free_netdev(dev);
  6790. return rc;
  6791. }
  6792. dev->netdev_ops = &bnx2_netdev_ops;
  6793. dev->watchdog_timeo = TX_TIMEOUT;
  6794. dev->ethtool_ops = &bnx2_ethtool_ops;
  6795. bp = netdev_priv(dev);
  6796. pci_set_drvdata(pdev, dev);
  6797. rc = bnx2_request_firmware(bp);
  6798. if (rc)
  6799. goto error;
  6800. memcpy(dev->dev_addr, bp->mac_addr, 6);
  6801. memcpy(dev->perm_addr, bp->mac_addr, 6);
  6802. dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
  6803. NETIF_F_TSO | NETIF_F_TSO_ECN |
  6804. NETIF_F_RXHASH | NETIF_F_RXCSUM;
  6805. if (CHIP_NUM(bp) == CHIP_NUM_5709)
  6806. dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
  6807. dev->vlan_features = dev->hw_features;
  6808. dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
  6809. dev->features |= dev->hw_features;
  6810. if ((rc = register_netdev(dev))) {
  6811. dev_err(&pdev->dev, "Cannot register net device\n");
  6812. goto error;
  6813. }
  6814. netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
  6815. board_info[ent->driver_data].name,
  6816. ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
  6817. ((CHIP_ID(bp) & 0x0ff0) >> 4),
  6818. bnx2_bus_string(bp, str),
  6819. dev->base_addr,
  6820. bp->pdev->irq, dev->dev_addr);
  6821. return 0;
  6822. error:
  6823. if (bp->mips_firmware)
  6824. release_firmware(bp->mips_firmware);
  6825. if (bp->rv2p_firmware)
  6826. release_firmware(bp->rv2p_firmware);
  6827. if (bp->regview)
  6828. iounmap(bp->regview);
  6829. pci_release_regions(pdev);
  6830. pci_disable_device(pdev);
  6831. pci_set_drvdata(pdev, NULL);
  6832. free_netdev(dev);
  6833. return rc;
  6834. }
  6835. static void __devexit
  6836. bnx2_remove_one(struct pci_dev *pdev)
  6837. {
  6838. struct net_device *dev = pci_get_drvdata(pdev);
  6839. struct bnx2 *bp = netdev_priv(dev);
  6840. unregister_netdev(dev);
  6841. del_timer_sync(&bp->timer);
  6842. if (bp->mips_firmware)
  6843. release_firmware(bp->mips_firmware);
  6844. if (bp->rv2p_firmware)
  6845. release_firmware(bp->rv2p_firmware);
  6846. if (bp->regview)
  6847. iounmap(bp->regview);
  6848. kfree(bp->temp_stats_blk);
  6849. if (bp->flags & BNX2_FLAG_AER_ENABLED) {
  6850. pci_disable_pcie_error_reporting(pdev);
  6851. bp->flags &= ~BNX2_FLAG_AER_ENABLED;
  6852. }
  6853. free_netdev(dev);
  6854. pci_release_regions(pdev);
  6855. pci_disable_device(pdev);
  6856. pci_set_drvdata(pdev, NULL);
  6857. }
  6858. static int
  6859. bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
  6860. {
  6861. struct net_device *dev = pci_get_drvdata(pdev);
  6862. struct bnx2 *bp = netdev_priv(dev);
  6863. /* PCI register 4 needs to be saved whether netif_running() or not.
  6864. * MSI address and data need to be saved if using MSI and
  6865. * netif_running().
  6866. */
  6867. pci_save_state(pdev);
  6868. if (!netif_running(dev))
  6869. return 0;
  6870. cancel_work_sync(&bp->reset_task);
  6871. bnx2_netif_stop(bp, true);
  6872. netif_device_detach(dev);
  6873. del_timer_sync(&bp->timer);
  6874. bnx2_shutdown_chip(bp);
  6875. bnx2_free_skbs(bp);
  6876. bnx2_set_power_state(bp, pci_choose_state(pdev, state));
  6877. return 0;
  6878. }
  6879. static int
  6880. bnx2_resume(struct pci_dev *pdev)
  6881. {
  6882. struct net_device *dev = pci_get_drvdata(pdev);
  6883. struct bnx2 *bp = netdev_priv(dev);
  6884. pci_restore_state(pdev);
  6885. if (!netif_running(dev))
  6886. return 0;
  6887. bnx2_set_power_state(bp, PCI_D0);
  6888. netif_device_attach(dev);
  6889. bnx2_init_nic(bp, 1);
  6890. bnx2_netif_start(bp, true);
  6891. return 0;
  6892. }
  6893. /**
  6894. * bnx2_io_error_detected - called when PCI error is detected
  6895. * @pdev: Pointer to PCI device
  6896. * @state: The current pci connection state
  6897. *
  6898. * This function is called after a PCI bus error affecting
  6899. * this device has been detected.
  6900. */
  6901. static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
  6902. pci_channel_state_t state)
  6903. {
  6904. struct net_device *dev = pci_get_drvdata(pdev);
  6905. struct bnx2 *bp = netdev_priv(dev);
  6906. rtnl_lock();
  6907. netif_device_detach(dev);
  6908. if (state == pci_channel_io_perm_failure) {
  6909. rtnl_unlock();
  6910. return PCI_ERS_RESULT_DISCONNECT;
  6911. }
  6912. if (netif_running(dev)) {
  6913. bnx2_netif_stop(bp, true);
  6914. del_timer_sync(&bp->timer);
  6915. bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
  6916. }
  6917. pci_disable_device(pdev);
  6918. rtnl_unlock();
  6919. /* Request a slot slot reset. */
  6920. return PCI_ERS_RESULT_NEED_RESET;
  6921. }
  6922. /**
  6923. * bnx2_io_slot_reset - called after the pci bus has been reset.
  6924. * @pdev: Pointer to PCI device
  6925. *
  6926. * Restart the card from scratch, as if from a cold-boot.
  6927. */
  6928. static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
  6929. {
  6930. struct net_device *dev = pci_get_drvdata(pdev);
  6931. struct bnx2 *bp = netdev_priv(dev);
  6932. pci_ers_result_t result;
  6933. int err;
  6934. rtnl_lock();
  6935. if (pci_enable_device(pdev)) {
  6936. dev_err(&pdev->dev,
  6937. "Cannot re-enable PCI device after reset\n");
  6938. result = PCI_ERS_RESULT_DISCONNECT;
  6939. } else {
  6940. pci_set_master(pdev);
  6941. pci_restore_state(pdev);
  6942. pci_save_state(pdev);
  6943. if (netif_running(dev)) {
  6944. bnx2_set_power_state(bp, PCI_D0);
  6945. bnx2_init_nic(bp, 1);
  6946. }
  6947. result = PCI_ERS_RESULT_RECOVERED;
  6948. }
  6949. rtnl_unlock();
  6950. if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
  6951. return result;
  6952. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  6953. if (err) {
  6954. dev_err(&pdev->dev,
  6955. "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
  6956. err); /* non-fatal, continue */
  6957. }
  6958. return result;
  6959. }
  6960. /**
  6961. * bnx2_io_resume - called when traffic can start flowing again.
  6962. * @pdev: Pointer to PCI device
  6963. *
  6964. * This callback is called when the error recovery driver tells us that
  6965. * its OK to resume normal operation.
  6966. */
  6967. static void bnx2_io_resume(struct pci_dev *pdev)
  6968. {
  6969. struct net_device *dev = pci_get_drvdata(pdev);
  6970. struct bnx2 *bp = netdev_priv(dev);
  6971. rtnl_lock();
  6972. if (netif_running(dev))
  6973. bnx2_netif_start(bp, true);
  6974. netif_device_attach(dev);
  6975. rtnl_unlock();
  6976. }
  6977. static struct pci_error_handlers bnx2_err_handler = {
  6978. .error_detected = bnx2_io_error_detected,
  6979. .slot_reset = bnx2_io_slot_reset,
  6980. .resume = bnx2_io_resume,
  6981. };
  6982. static struct pci_driver bnx2_pci_driver = {
  6983. .name = DRV_MODULE_NAME,
  6984. .id_table = bnx2_pci_tbl,
  6985. .probe = bnx2_init_one,
  6986. .remove = __devexit_p(bnx2_remove_one),
  6987. .suspend = bnx2_suspend,
  6988. .resume = bnx2_resume,
  6989. .err_handler = &bnx2_err_handler,
  6990. };
  6991. static int __init bnx2_init(void)
  6992. {
  6993. return pci_register_driver(&bnx2_pci_driver);
  6994. }
  6995. static void __exit bnx2_cleanup(void)
  6996. {
  6997. pci_unregister_driver(&bnx2_pci_driver);
  6998. }
  6999. module_init(bnx2_init);
  7000. module_exit(bnx2_cleanup);