qla_mbx.c 154 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2014 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_target.h"
  9. #include <linux/delay.h>
  10. #include <linux/gfp.h>
  11. static struct mb_cmd_name {
  12. uint16_t cmd;
  13. const char *str;
  14. } mb_str[] = {
  15. {MBC_GET_PORT_DATABASE, "GPDB"},
  16. {MBC_GET_ID_LIST, "GIDList"},
  17. {MBC_GET_LINK_PRIV_STATS, "Stats"},
  18. };
  19. static const char *mb_to_str(uint16_t cmd)
  20. {
  21. int i;
  22. struct mb_cmd_name *e;
  23. for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
  24. e = mb_str + i;
  25. if (cmd == e->cmd)
  26. return e->str;
  27. }
  28. return "unknown";
  29. }
  30. static struct rom_cmd {
  31. uint16_t cmd;
  32. } rom_cmds[] = {
  33. { MBC_LOAD_RAM },
  34. { MBC_EXECUTE_FIRMWARE },
  35. { MBC_READ_RAM_WORD },
  36. { MBC_MAILBOX_REGISTER_TEST },
  37. { MBC_VERIFY_CHECKSUM },
  38. { MBC_GET_FIRMWARE_VERSION },
  39. { MBC_LOAD_RISC_RAM },
  40. { MBC_DUMP_RISC_RAM },
  41. { MBC_LOAD_RISC_RAM_EXTENDED },
  42. { MBC_DUMP_RISC_RAM_EXTENDED },
  43. { MBC_WRITE_RAM_WORD_EXTENDED },
  44. { MBC_READ_RAM_EXTENDED },
  45. { MBC_GET_RESOURCE_COUNTS },
  46. { MBC_SET_FIRMWARE_OPTION },
  47. { MBC_MID_INITIALIZE_FIRMWARE },
  48. { MBC_GET_FIRMWARE_STATE },
  49. { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
  50. { MBC_GET_RETRY_COUNT },
  51. { MBC_TRACE_CONTROL },
  52. { MBC_INITIALIZE_MULTIQ },
  53. { MBC_IOCB_COMMAND_A64 },
  54. { MBC_GET_ADAPTER_LOOP_ID },
  55. { MBC_READ_SFP },
  56. };
  57. static int is_rom_cmd(uint16_t cmd)
  58. {
  59. int i;
  60. struct rom_cmd *wc;
  61. for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
  62. wc = rom_cmds + i;
  63. if (wc->cmd == cmd)
  64. return 1;
  65. }
  66. return 0;
  67. }
  68. /*
  69. * qla2x00_mailbox_command
  70. * Issue mailbox command and waits for completion.
  71. *
  72. * Input:
  73. * ha = adapter block pointer.
  74. * mcp = driver internal mbx struct pointer.
  75. *
  76. * Output:
  77. * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
  78. *
  79. * Returns:
  80. * 0 : QLA_SUCCESS = cmd performed success
  81. * 1 : QLA_FUNCTION_FAILED (error encountered)
  82. * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
  83. *
  84. * Context:
  85. * Kernel context.
  86. */
  87. static int
  88. qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
  89. {
  90. int rval, i;
  91. unsigned long flags = 0;
  92. device_reg_t *reg;
  93. uint8_t abort_active;
  94. uint8_t io_lock_on;
  95. uint16_t command = 0;
  96. uint16_t *iptr;
  97. uint16_t __iomem *optr;
  98. uint32_t cnt;
  99. uint32_t mboxes;
  100. unsigned long wait_time;
  101. struct qla_hw_data *ha = vha->hw;
  102. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  103. ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
  104. if (ha->pdev->error_state > pci_channel_io_frozen) {
  105. ql_log(ql_log_warn, vha, 0x1001,
  106. "error_state is greater than pci_channel_io_frozen, "
  107. "exiting.\n");
  108. return QLA_FUNCTION_TIMEOUT;
  109. }
  110. if (vha->device_flags & DFLG_DEV_FAILED) {
  111. ql_log(ql_log_warn, vha, 0x1002,
  112. "Device in failed state, exiting.\n");
  113. return QLA_FUNCTION_TIMEOUT;
  114. }
  115. /* if PCI error, then avoid mbx processing.*/
  116. if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
  117. test_bit(UNLOADING, &base_vha->dpc_flags)) {
  118. ql_log(ql_log_warn, vha, 0xd04e,
  119. "PCI error, exiting.\n");
  120. return QLA_FUNCTION_TIMEOUT;
  121. }
  122. reg = ha->iobase;
  123. io_lock_on = base_vha->flags.init_done;
  124. rval = QLA_SUCCESS;
  125. abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
  126. if (ha->flags.pci_channel_io_perm_failure) {
  127. ql_log(ql_log_warn, vha, 0x1003,
  128. "Perm failure on EEH timeout MBX, exiting.\n");
  129. return QLA_FUNCTION_TIMEOUT;
  130. }
  131. if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
  132. /* Setting Link-Down error */
  133. mcp->mb[0] = MBS_LINK_DOWN_ERROR;
  134. ql_log(ql_log_warn, vha, 0x1004,
  135. "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
  136. return QLA_FUNCTION_TIMEOUT;
  137. }
  138. /* check if ISP abort is active and return cmd with timeout */
  139. if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
  140. test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
  141. test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
  142. !is_rom_cmd(mcp->mb[0])) {
  143. ql_log(ql_log_info, vha, 0x1005,
  144. "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
  145. mcp->mb[0]);
  146. return QLA_FUNCTION_TIMEOUT;
  147. }
  148. /*
  149. * Wait for active mailbox commands to finish by waiting at most tov
  150. * seconds. This is to serialize actual issuing of mailbox cmds during
  151. * non ISP abort time.
  152. */
  153. if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
  154. /* Timeout occurred. Return error. */
  155. ql_log(ql_log_warn, vha, 0xd035,
  156. "Cmd access timeout, cmd=0x%x, Exiting.\n",
  157. mcp->mb[0]);
  158. return QLA_FUNCTION_TIMEOUT;
  159. }
  160. ha->flags.mbox_busy = 1;
  161. /* Save mailbox command for debug */
  162. ha->mcp = mcp;
  163. ql_dbg(ql_dbg_mbx, vha, 0x1006,
  164. "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
  165. spin_lock_irqsave(&ha->hardware_lock, flags);
  166. /* Load mailbox registers. */
  167. if (IS_P3P_TYPE(ha))
  168. optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
  169. else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
  170. optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
  171. else
  172. optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
  173. iptr = mcp->mb;
  174. command = mcp->mb[0];
  175. mboxes = mcp->out_mb;
  176. ql_dbg(ql_dbg_mbx, vha, 0x1111,
  177. "Mailbox registers (OUT):\n");
  178. for (cnt = 0; cnt < ha->mbx_count; cnt++) {
  179. if (IS_QLA2200(ha) && cnt == 8)
  180. optr =
  181. (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
  182. if (mboxes & BIT_0) {
  183. ql_dbg(ql_dbg_mbx, vha, 0x1112,
  184. "mbox[%d]<-0x%04x\n", cnt, *iptr);
  185. WRT_REG_WORD(optr, *iptr);
  186. }
  187. mboxes >>= 1;
  188. optr++;
  189. iptr++;
  190. }
  191. ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
  192. "I/O Address = %p.\n", optr);
  193. /* Issue set host interrupt command to send cmd out. */
  194. ha->flags.mbox_int = 0;
  195. clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  196. /* Unlock mbx registers and wait for interrupt */
  197. ql_dbg(ql_dbg_mbx, vha, 0x100f,
  198. "Going to unlock irq & waiting for interrupts. "
  199. "jiffies=%lx.\n", jiffies);
  200. /* Wait for mbx cmd completion until timeout */
  201. if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
  202. set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
  203. if (IS_P3P_TYPE(ha)) {
  204. if (RD_REG_DWORD(&reg->isp82.hint) &
  205. HINT_MBX_INT_PENDING) {
  206. spin_unlock_irqrestore(&ha->hardware_lock,
  207. flags);
  208. ha->flags.mbox_busy = 0;
  209. ql_dbg(ql_dbg_mbx, vha, 0x1010,
  210. "Pending mailbox timeout, exiting.\n");
  211. rval = QLA_FUNCTION_TIMEOUT;
  212. goto premature_exit;
  213. }
  214. WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
  215. } else if (IS_FWI2_CAPABLE(ha))
  216. WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
  217. else
  218. WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
  219. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  220. wait_time = jiffies;
  221. if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
  222. mcp->tov * HZ)) {
  223. ql_dbg(ql_dbg_mbx, vha, 0x117a,
  224. "cmd=%x Timeout.\n", command);
  225. spin_lock_irqsave(&ha->hardware_lock, flags);
  226. clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
  227. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  228. }
  229. if (time_after(jiffies, wait_time + 5 * HZ))
  230. ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
  231. command, jiffies_to_msecs(jiffies - wait_time));
  232. } else {
  233. ql_dbg(ql_dbg_mbx, vha, 0x1011,
  234. "Cmd=%x Polling Mode.\n", command);
  235. if (IS_P3P_TYPE(ha)) {
  236. if (RD_REG_DWORD(&reg->isp82.hint) &
  237. HINT_MBX_INT_PENDING) {
  238. spin_unlock_irqrestore(&ha->hardware_lock,
  239. flags);
  240. ha->flags.mbox_busy = 0;
  241. ql_dbg(ql_dbg_mbx, vha, 0x1012,
  242. "Pending mailbox timeout, exiting.\n");
  243. rval = QLA_FUNCTION_TIMEOUT;
  244. goto premature_exit;
  245. }
  246. WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
  247. } else if (IS_FWI2_CAPABLE(ha))
  248. WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
  249. else
  250. WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
  251. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  252. wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
  253. while (!ha->flags.mbox_int) {
  254. if (time_after(jiffies, wait_time))
  255. break;
  256. /* Check for pending interrupts. */
  257. qla2x00_poll(ha->rsp_q_map[0]);
  258. if (!ha->flags.mbox_int &&
  259. !(IS_QLA2200(ha) &&
  260. command == MBC_LOAD_RISC_RAM_EXTENDED))
  261. msleep(10);
  262. } /* while */
  263. ql_dbg(ql_dbg_mbx, vha, 0x1013,
  264. "Waited %d sec.\n",
  265. (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
  266. }
  267. /* Check whether we timed out */
  268. if (ha->flags.mbox_int) {
  269. uint16_t *iptr2;
  270. ql_dbg(ql_dbg_mbx, vha, 0x1014,
  271. "Cmd=%x completed.\n", command);
  272. /* Got interrupt. Clear the flag. */
  273. ha->flags.mbox_int = 0;
  274. clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  275. if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
  276. ha->flags.mbox_busy = 0;
  277. /* Setting Link-Down error */
  278. mcp->mb[0] = MBS_LINK_DOWN_ERROR;
  279. ha->mcp = NULL;
  280. rval = QLA_FUNCTION_FAILED;
  281. ql_log(ql_log_warn, vha, 0xd048,
  282. "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
  283. goto premature_exit;
  284. }
  285. if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
  286. rval = QLA_FUNCTION_FAILED;
  287. /* Load return mailbox registers. */
  288. iptr2 = mcp->mb;
  289. iptr = (uint16_t *)&ha->mailbox_out[0];
  290. mboxes = mcp->in_mb;
  291. ql_dbg(ql_dbg_mbx, vha, 0x1113,
  292. "Mailbox registers (IN):\n");
  293. for (cnt = 0; cnt < ha->mbx_count; cnt++) {
  294. if (mboxes & BIT_0) {
  295. *iptr2 = *iptr;
  296. ql_dbg(ql_dbg_mbx, vha, 0x1114,
  297. "mbox[%d]->0x%04x\n", cnt, *iptr2);
  298. }
  299. mboxes >>= 1;
  300. iptr2++;
  301. iptr++;
  302. }
  303. } else {
  304. uint16_t mb[8];
  305. uint32_t ictrl, host_status, hccr;
  306. uint16_t w;
  307. if (IS_FWI2_CAPABLE(ha)) {
  308. mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
  309. mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
  310. mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
  311. mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
  312. mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
  313. ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
  314. host_status = RD_REG_DWORD(&reg->isp24.host_status);
  315. hccr = RD_REG_DWORD(&reg->isp24.hccr);
  316. ql_log(ql_log_warn, vha, 0xd04c,
  317. "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
  318. "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
  319. command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
  320. mb[7], host_status, hccr);
  321. } else {
  322. mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
  323. ictrl = RD_REG_WORD(&reg->isp.ictrl);
  324. ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
  325. "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
  326. "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
  327. }
  328. ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
  329. /* Capture FW dump only, if PCI device active */
  330. if (!pci_channel_offline(vha->hw->pdev)) {
  331. pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
  332. if (w == 0xffff || ictrl == 0xffffffff) {
  333. /* This is special case if there is unload
  334. * of driver happening and if PCI device go
  335. * into bad state due to PCI error condition
  336. * then only PCI ERR flag would be set.
  337. * we will do premature exit for above case.
  338. */
  339. ha->flags.mbox_busy = 0;
  340. rval = QLA_FUNCTION_TIMEOUT;
  341. goto premature_exit;
  342. }
  343. /* Attempt to capture firmware dump for further
  344. * anallysis of the current formware state. we do not
  345. * need to do this if we are intentionally generating
  346. * a dump
  347. */
  348. if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
  349. ha->isp_ops->fw_dump(vha, 0);
  350. rval = QLA_FUNCTION_TIMEOUT;
  351. }
  352. }
  353. ha->flags.mbox_busy = 0;
  354. /* Clean up */
  355. ha->mcp = NULL;
  356. if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
  357. ql_dbg(ql_dbg_mbx, vha, 0x101a,
  358. "Checking for additional resp interrupt.\n");
  359. /* polling mode for non isp_abort commands. */
  360. qla2x00_poll(ha->rsp_q_map[0]);
  361. }
  362. if (rval == QLA_FUNCTION_TIMEOUT &&
  363. mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
  364. if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
  365. ha->flags.eeh_busy) {
  366. /* not in dpc. schedule it for dpc to take over. */
  367. ql_dbg(ql_dbg_mbx, vha, 0x101b,
  368. "Timeout, schedule isp_abort_needed.\n");
  369. if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
  370. !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
  371. !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  372. if (IS_QLA82XX(ha)) {
  373. ql_dbg(ql_dbg_mbx, vha, 0x112a,
  374. "disabling pause transmit on port "
  375. "0 & 1.\n");
  376. qla82xx_wr_32(ha,
  377. QLA82XX_CRB_NIU + 0x98,
  378. CRB_NIU_XG_PAUSE_CTL_P0|
  379. CRB_NIU_XG_PAUSE_CTL_P1);
  380. }
  381. ql_log(ql_log_info, base_vha, 0x101c,
  382. "Mailbox cmd timeout occurred, cmd=0x%x, "
  383. "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
  384. "abort.\n", command, mcp->mb[0],
  385. ha->flags.eeh_busy);
  386. set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  387. qla2xxx_wake_dpc(vha);
  388. }
  389. } else if (!abort_active) {
  390. /* call abort directly since we are in the DPC thread */
  391. ql_dbg(ql_dbg_mbx, vha, 0x101d,
  392. "Timeout, calling abort_isp.\n");
  393. if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
  394. !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
  395. !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
  396. if (IS_QLA82XX(ha)) {
  397. ql_dbg(ql_dbg_mbx, vha, 0x112b,
  398. "disabling pause transmit on port "
  399. "0 & 1.\n");
  400. qla82xx_wr_32(ha,
  401. QLA82XX_CRB_NIU + 0x98,
  402. CRB_NIU_XG_PAUSE_CTL_P0|
  403. CRB_NIU_XG_PAUSE_CTL_P1);
  404. }
  405. ql_log(ql_log_info, base_vha, 0x101e,
  406. "Mailbox cmd timeout occurred, cmd=0x%x, "
  407. "mb[0]=0x%x. Scheduling ISP abort ",
  408. command, mcp->mb[0]);
  409. set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
  410. clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
  411. /* Allow next mbx cmd to come in. */
  412. complete(&ha->mbx_cmd_comp);
  413. if (ha->isp_ops->abort_isp(vha)) {
  414. /* Failed. retry later. */
  415. set_bit(ISP_ABORT_NEEDED,
  416. &vha->dpc_flags);
  417. }
  418. clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
  419. ql_dbg(ql_dbg_mbx, vha, 0x101f,
  420. "Finished abort_isp.\n");
  421. goto mbx_done;
  422. }
  423. }
  424. }
  425. premature_exit:
  426. /* Allow next mbx cmd to come in. */
  427. complete(&ha->mbx_cmd_comp);
  428. mbx_done:
  429. if (rval) {
  430. if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
  431. pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
  432. dev_name(&ha->pdev->dev), 0x1020+0x800,
  433. vha->host_no);
  434. mboxes = mcp->in_mb;
  435. cnt = 4;
  436. for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
  437. if (mboxes & BIT_0) {
  438. printk(" mb[%u]=%x", i, mcp->mb[i]);
  439. cnt--;
  440. }
  441. pr_warn(" cmd=%x ****\n", command);
  442. }
  443. ql_dbg(ql_dbg_mbx, vha, 0x1198,
  444. "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
  445. RD_REG_DWORD(&reg->isp24.host_status),
  446. RD_REG_DWORD(&reg->isp24.ictrl),
  447. RD_REG_DWORD(&reg->isp24.istatus));
  448. } else {
  449. ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
  450. }
  451. return rval;
  452. }
  453. int
  454. qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
  455. uint32_t risc_code_size)
  456. {
  457. int rval;
  458. struct qla_hw_data *ha = vha->hw;
  459. mbx_cmd_t mc;
  460. mbx_cmd_t *mcp = &mc;
  461. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
  462. "Entered %s.\n", __func__);
  463. if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
  464. mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
  465. mcp->mb[8] = MSW(risc_addr);
  466. mcp->out_mb = MBX_8|MBX_0;
  467. } else {
  468. mcp->mb[0] = MBC_LOAD_RISC_RAM;
  469. mcp->out_mb = MBX_0;
  470. }
  471. mcp->mb[1] = LSW(risc_addr);
  472. mcp->mb[2] = MSW(req_dma);
  473. mcp->mb[3] = LSW(req_dma);
  474. mcp->mb[6] = MSW(MSD(req_dma));
  475. mcp->mb[7] = LSW(MSD(req_dma));
  476. mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
  477. if (IS_FWI2_CAPABLE(ha)) {
  478. mcp->mb[4] = MSW(risc_code_size);
  479. mcp->mb[5] = LSW(risc_code_size);
  480. mcp->out_mb |= MBX_5|MBX_4;
  481. } else {
  482. mcp->mb[4] = LSW(risc_code_size);
  483. mcp->out_mb |= MBX_4;
  484. }
  485. mcp->in_mb = MBX_0;
  486. mcp->tov = MBX_TOV_SECONDS;
  487. mcp->flags = 0;
  488. rval = qla2x00_mailbox_command(vha, mcp);
  489. if (rval != QLA_SUCCESS) {
  490. ql_dbg(ql_dbg_mbx, vha, 0x1023,
  491. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  492. } else {
  493. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
  494. "Done %s.\n", __func__);
  495. }
  496. return rval;
  497. }
  498. #define EXTENDED_BB_CREDITS BIT_0
  499. #define NVME_ENABLE_FLAG BIT_3
  500. static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
  501. {
  502. uint16_t mb4 = BIT_0;
  503. if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
  504. mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
  505. return mb4;
  506. }
  507. static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
  508. {
  509. uint16_t mb4 = BIT_0;
  510. if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  511. struct nvram_81xx *nv = ha->nvram;
  512. mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
  513. }
  514. return mb4;
  515. }
  516. /*
  517. * qla2x00_execute_fw
  518. * Start adapter firmware.
  519. *
  520. * Input:
  521. * ha = adapter block pointer.
  522. * TARGET_QUEUE_LOCK must be released.
  523. * ADAPTER_STATE_LOCK must be released.
  524. *
  525. * Returns:
  526. * qla2x00 local function return status code.
  527. *
  528. * Context:
  529. * Kernel context.
  530. */
  531. int
  532. qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
  533. {
  534. int rval;
  535. struct qla_hw_data *ha = vha->hw;
  536. mbx_cmd_t mc;
  537. mbx_cmd_t *mcp = &mc;
  538. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
  539. "Entered %s.\n", __func__);
  540. mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
  541. mcp->out_mb = MBX_0;
  542. mcp->in_mb = MBX_0;
  543. if (IS_FWI2_CAPABLE(ha)) {
  544. mcp->mb[1] = MSW(risc_addr);
  545. mcp->mb[2] = LSW(risc_addr);
  546. mcp->mb[3] = 0;
  547. mcp->mb[4] = 0;
  548. mcp->mb[11] = 0;
  549. ha->flags.using_lr_setting = 0;
  550. if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
  551. IS_QLA27XX(ha)) {
  552. if (ql2xautodetectsfp) {
  553. if (ha->flags.detected_lr_sfp) {
  554. mcp->mb[4] |=
  555. qla25xx_set_sfp_lr_dist(ha);
  556. ha->flags.using_lr_setting = 1;
  557. }
  558. } else {
  559. struct nvram_81xx *nv = ha->nvram;
  560. /* set LR distance if specified in nvram */
  561. if (nv->enhanced_features &
  562. NEF_LR_DIST_ENABLE) {
  563. mcp->mb[4] |=
  564. qla25xx_set_nvr_lr_dist(ha);
  565. ha->flags.using_lr_setting = 1;
  566. }
  567. }
  568. }
  569. if (ql2xnvmeenable && IS_QLA27XX(ha))
  570. mcp->mb[4] |= NVME_ENABLE_FLAG;
  571. if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  572. struct nvram_81xx *nv = ha->nvram;
  573. /* set minimum speed if specified in nvram */
  574. if (nv->min_link_speed >= 2 &&
  575. nv->min_link_speed <= 5) {
  576. mcp->mb[4] |= BIT_4;
  577. mcp->mb[11] = nv->min_link_speed;
  578. mcp->out_mb |= MBX_11;
  579. mcp->in_mb |= BIT_5;
  580. vha->min_link_speed_feat = nv->min_link_speed;
  581. }
  582. }
  583. if (ha->flags.exlogins_enabled)
  584. mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
  585. if (ha->flags.exchoffld_enabled)
  586. mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
  587. mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
  588. mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
  589. } else {
  590. mcp->mb[1] = LSW(risc_addr);
  591. mcp->out_mb |= MBX_1;
  592. if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
  593. mcp->mb[2] = 0;
  594. mcp->out_mb |= MBX_2;
  595. }
  596. }
  597. mcp->tov = MBX_TOV_SECONDS;
  598. mcp->flags = 0;
  599. rval = qla2x00_mailbox_command(vha, mcp);
  600. if (rval != QLA_SUCCESS) {
  601. ql_dbg(ql_dbg_mbx, vha, 0x1026,
  602. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  603. } else {
  604. if (IS_FWI2_CAPABLE(ha)) {
  605. ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
  606. ql_dbg(ql_dbg_mbx, vha, 0x119a,
  607. "fw_ability_mask=%x.\n", ha->fw_ability_mask);
  608. ql_dbg(ql_dbg_mbx, vha, 0x1027,
  609. "exchanges=%x.\n", mcp->mb[1]);
  610. if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  611. ha->max_speed_sup = mcp->mb[2] & BIT_0;
  612. ql_dbg(ql_dbg_mbx, vha, 0x119b,
  613. "Maximum speed supported=%s.\n",
  614. ha->max_speed_sup ? "32Gps" : "16Gps");
  615. if (vha->min_link_speed_feat) {
  616. ha->min_link_speed = mcp->mb[5];
  617. ql_dbg(ql_dbg_mbx, vha, 0x119c,
  618. "Minimum speed set=%s.\n",
  619. mcp->mb[5] == 5 ? "32Gps" :
  620. mcp->mb[5] == 4 ? "16Gps" :
  621. mcp->mb[5] == 3 ? "8Gps" :
  622. mcp->mb[5] == 2 ? "4Gps" :
  623. "unknown");
  624. }
  625. }
  626. }
  627. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
  628. "Done.\n");
  629. }
  630. return rval;
  631. }
  632. /*
  633. * qla_get_exlogin_status
  634. * Get extended login status
  635. * uses the memory offload control/status Mailbox
  636. *
  637. * Input:
  638. * ha: adapter state pointer.
  639. * fwopt: firmware options
  640. *
  641. * Returns:
  642. * qla2x00 local function status
  643. *
  644. * Context:
  645. * Kernel context.
  646. */
  647. #define FETCH_XLOGINS_STAT 0x8
  648. int
  649. qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
  650. uint16_t *ex_logins_cnt)
  651. {
  652. int rval;
  653. mbx_cmd_t mc;
  654. mbx_cmd_t *mcp = &mc;
  655. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
  656. "Entered %s\n", __func__);
  657. memset(mcp->mb, 0 , sizeof(mcp->mb));
  658. mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
  659. mcp->mb[1] = FETCH_XLOGINS_STAT;
  660. mcp->out_mb = MBX_1|MBX_0;
  661. mcp->in_mb = MBX_10|MBX_4|MBX_0;
  662. mcp->tov = MBX_TOV_SECONDS;
  663. mcp->flags = 0;
  664. rval = qla2x00_mailbox_command(vha, mcp);
  665. if (rval != QLA_SUCCESS) {
  666. ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
  667. } else {
  668. *buf_sz = mcp->mb[4];
  669. *ex_logins_cnt = mcp->mb[10];
  670. ql_log(ql_log_info, vha, 0x1190,
  671. "buffer size 0x%x, exchange login count=%d\n",
  672. mcp->mb[4], mcp->mb[10]);
  673. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
  674. "Done %s.\n", __func__);
  675. }
  676. return rval;
  677. }
  678. /*
  679. * qla_set_exlogin_mem_cfg
  680. * set extended login memory configuration
  681. * Mbx needs to be issues before init_cb is set
  682. *
  683. * Input:
  684. * ha: adapter state pointer.
  685. * buffer: buffer pointer
  686. * phys_addr: physical address of buffer
  687. * size: size of buffer
  688. * TARGET_QUEUE_LOCK must be released
  689. * ADAPTER_STATE_LOCK must be release
  690. *
  691. * Returns:
  692. * qla2x00 local funxtion status code.
  693. *
  694. * Context:
  695. * Kernel context.
  696. */
  697. #define CONFIG_XLOGINS_MEM 0x3
  698. int
  699. qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
  700. {
  701. int rval;
  702. mbx_cmd_t mc;
  703. mbx_cmd_t *mcp = &mc;
  704. struct qla_hw_data *ha = vha->hw;
  705. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
  706. "Entered %s.\n", __func__);
  707. memset(mcp->mb, 0 , sizeof(mcp->mb));
  708. mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
  709. mcp->mb[1] = CONFIG_XLOGINS_MEM;
  710. mcp->mb[2] = MSW(phys_addr);
  711. mcp->mb[3] = LSW(phys_addr);
  712. mcp->mb[6] = MSW(MSD(phys_addr));
  713. mcp->mb[7] = LSW(MSD(phys_addr));
  714. mcp->mb[8] = MSW(ha->exlogin_size);
  715. mcp->mb[9] = LSW(ha->exlogin_size);
  716. mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  717. mcp->in_mb = MBX_11|MBX_0;
  718. mcp->tov = MBX_TOV_SECONDS;
  719. mcp->flags = 0;
  720. rval = qla2x00_mailbox_command(vha, mcp);
  721. if (rval != QLA_SUCCESS) {
  722. /*EMPTY*/
  723. ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
  724. } else {
  725. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
  726. "Done %s.\n", __func__);
  727. }
  728. return rval;
  729. }
  730. /*
  731. * qla_get_exchoffld_status
  732. * Get exchange offload status
  733. * uses the memory offload control/status Mailbox
  734. *
  735. * Input:
  736. * ha: adapter state pointer.
  737. * fwopt: firmware options
  738. *
  739. * Returns:
  740. * qla2x00 local function status
  741. *
  742. * Context:
  743. * Kernel context.
  744. */
  745. #define FETCH_XCHOFFLD_STAT 0x2
  746. int
  747. qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
  748. uint16_t *ex_logins_cnt)
  749. {
  750. int rval;
  751. mbx_cmd_t mc;
  752. mbx_cmd_t *mcp = &mc;
  753. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
  754. "Entered %s\n", __func__);
  755. memset(mcp->mb, 0 , sizeof(mcp->mb));
  756. mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
  757. mcp->mb[1] = FETCH_XCHOFFLD_STAT;
  758. mcp->out_mb = MBX_1|MBX_0;
  759. mcp->in_mb = MBX_10|MBX_4|MBX_0;
  760. mcp->tov = MBX_TOV_SECONDS;
  761. mcp->flags = 0;
  762. rval = qla2x00_mailbox_command(vha, mcp);
  763. if (rval != QLA_SUCCESS) {
  764. ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
  765. } else {
  766. *buf_sz = mcp->mb[4];
  767. *ex_logins_cnt = mcp->mb[10];
  768. ql_log(ql_log_info, vha, 0x118e,
  769. "buffer size 0x%x, exchange offload count=%d\n",
  770. mcp->mb[4], mcp->mb[10]);
  771. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
  772. "Done %s.\n", __func__);
  773. }
  774. return rval;
  775. }
  776. /*
  777. * qla_set_exchoffld_mem_cfg
  778. * Set exchange offload memory configuration
  779. * Mbx needs to be issues before init_cb is set
  780. *
  781. * Input:
  782. * ha: adapter state pointer.
  783. * buffer: buffer pointer
  784. * phys_addr: physical address of buffer
  785. * size: size of buffer
  786. * TARGET_QUEUE_LOCK must be released
  787. * ADAPTER_STATE_LOCK must be release
  788. *
  789. * Returns:
  790. * qla2x00 local funxtion status code.
  791. *
  792. * Context:
  793. * Kernel context.
  794. */
  795. #define CONFIG_XCHOFFLD_MEM 0x3
  796. int
  797. qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
  798. {
  799. int rval;
  800. mbx_cmd_t mc;
  801. mbx_cmd_t *mcp = &mc;
  802. struct qla_hw_data *ha = vha->hw;
  803. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
  804. "Entered %s.\n", __func__);
  805. memset(mcp->mb, 0 , sizeof(mcp->mb));
  806. mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
  807. mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
  808. mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
  809. mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
  810. mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
  811. mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
  812. mcp->mb[8] = MSW(ha->exchoffld_size);
  813. mcp->mb[9] = LSW(ha->exchoffld_size);
  814. mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  815. mcp->in_mb = MBX_11|MBX_0;
  816. mcp->tov = MBX_TOV_SECONDS;
  817. mcp->flags = 0;
  818. rval = qla2x00_mailbox_command(vha, mcp);
  819. if (rval != QLA_SUCCESS) {
  820. /*EMPTY*/
  821. ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
  822. } else {
  823. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
  824. "Done %s.\n", __func__);
  825. }
  826. return rval;
  827. }
  828. /*
  829. * qla2x00_get_fw_version
  830. * Get firmware version.
  831. *
  832. * Input:
  833. * ha: adapter state pointer.
  834. * major: pointer for major number.
  835. * minor: pointer for minor number.
  836. * subminor: pointer for subminor number.
  837. *
  838. * Returns:
  839. * qla2x00 local function return status code.
  840. *
  841. * Context:
  842. * Kernel context.
  843. */
  844. int
  845. qla2x00_get_fw_version(scsi_qla_host_t *vha)
  846. {
  847. int rval;
  848. mbx_cmd_t mc;
  849. mbx_cmd_t *mcp = &mc;
  850. struct qla_hw_data *ha = vha->hw;
  851. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
  852. "Entered %s.\n", __func__);
  853. mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
  854. mcp->out_mb = MBX_0;
  855. mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  856. if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
  857. mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
  858. if (IS_FWI2_CAPABLE(ha))
  859. mcp->in_mb |= MBX_17|MBX_16|MBX_15;
  860. if (IS_QLA27XX(ha))
  861. mcp->in_mb |=
  862. MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
  863. MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
  864. mcp->flags = 0;
  865. mcp->tov = MBX_TOV_SECONDS;
  866. rval = qla2x00_mailbox_command(vha, mcp);
  867. if (rval != QLA_SUCCESS)
  868. goto failed;
  869. /* Return mailbox data. */
  870. ha->fw_major_version = mcp->mb[1];
  871. ha->fw_minor_version = mcp->mb[2];
  872. ha->fw_subminor_version = mcp->mb[3];
  873. ha->fw_attributes = mcp->mb[6];
  874. if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
  875. ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
  876. else
  877. ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
  878. if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
  879. ha->mpi_version[0] = mcp->mb[10] & 0xff;
  880. ha->mpi_version[1] = mcp->mb[11] >> 8;
  881. ha->mpi_version[2] = mcp->mb[11] & 0xff;
  882. ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
  883. ha->phy_version[0] = mcp->mb[8] & 0xff;
  884. ha->phy_version[1] = mcp->mb[9] >> 8;
  885. ha->phy_version[2] = mcp->mb[9] & 0xff;
  886. }
  887. if (IS_FWI2_CAPABLE(ha)) {
  888. ha->fw_attributes_h = mcp->mb[15];
  889. ha->fw_attributes_ext[0] = mcp->mb[16];
  890. ha->fw_attributes_ext[1] = mcp->mb[17];
  891. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
  892. "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
  893. __func__, mcp->mb[15], mcp->mb[6]);
  894. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
  895. "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
  896. __func__, mcp->mb[17], mcp->mb[16]);
  897. if (ha->fw_attributes_h & 0x4)
  898. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
  899. "%s: Firmware supports Extended Login 0x%x\n",
  900. __func__, ha->fw_attributes_h);
  901. if (ha->fw_attributes_h & 0x8)
  902. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
  903. "%s: Firmware supports Exchange Offload 0x%x\n",
  904. __func__, ha->fw_attributes_h);
  905. /*
  906. * FW supports nvme and driver load parameter requested nvme.
  907. * BIT 26 of fw_attributes indicates NVMe support.
  908. */
  909. if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable)
  910. vha->flags.nvme_enabled = 1;
  911. }
  912. if (IS_QLA27XX(ha)) {
  913. ha->mpi_version[0] = mcp->mb[10] & 0xff;
  914. ha->mpi_version[1] = mcp->mb[11] >> 8;
  915. ha->mpi_version[2] = mcp->mb[11] & 0xff;
  916. ha->pep_version[0] = mcp->mb[13] & 0xff;
  917. ha->pep_version[1] = mcp->mb[14] >> 8;
  918. ha->pep_version[2] = mcp->mb[14] & 0xff;
  919. ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
  920. ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
  921. ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
  922. ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
  923. }
  924. failed:
  925. if (rval != QLA_SUCCESS) {
  926. /*EMPTY*/
  927. ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
  928. } else {
  929. /*EMPTY*/
  930. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
  931. "Done %s.\n", __func__);
  932. }
  933. return rval;
  934. }
  935. /*
  936. * qla2x00_get_fw_options
  937. * Set firmware options.
  938. *
  939. * Input:
  940. * ha = adapter block pointer.
  941. * fwopt = pointer for firmware options.
  942. *
  943. * Returns:
  944. * qla2x00 local function return status code.
  945. *
  946. * Context:
  947. * Kernel context.
  948. */
  949. int
  950. qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
  951. {
  952. int rval;
  953. mbx_cmd_t mc;
  954. mbx_cmd_t *mcp = &mc;
  955. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
  956. "Entered %s.\n", __func__);
  957. mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
  958. mcp->out_mb = MBX_0;
  959. mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  960. mcp->tov = MBX_TOV_SECONDS;
  961. mcp->flags = 0;
  962. rval = qla2x00_mailbox_command(vha, mcp);
  963. if (rval != QLA_SUCCESS) {
  964. /*EMPTY*/
  965. ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
  966. } else {
  967. fwopts[0] = mcp->mb[0];
  968. fwopts[1] = mcp->mb[1];
  969. fwopts[2] = mcp->mb[2];
  970. fwopts[3] = mcp->mb[3];
  971. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
  972. "Done %s.\n", __func__);
  973. }
  974. return rval;
  975. }
  976. /*
  977. * qla2x00_set_fw_options
  978. * Set firmware options.
  979. *
  980. * Input:
  981. * ha = adapter block pointer.
  982. * fwopt = pointer for firmware options.
  983. *
  984. * Returns:
  985. * qla2x00 local function return status code.
  986. *
  987. * Context:
  988. * Kernel context.
  989. */
  990. int
  991. qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
  992. {
  993. int rval;
  994. mbx_cmd_t mc;
  995. mbx_cmd_t *mcp = &mc;
  996. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
  997. "Entered %s.\n", __func__);
  998. mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
  999. mcp->mb[1] = fwopts[1];
  1000. mcp->mb[2] = fwopts[2];
  1001. mcp->mb[3] = fwopts[3];
  1002. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  1003. mcp->in_mb = MBX_0;
  1004. if (IS_FWI2_CAPABLE(vha->hw)) {
  1005. mcp->in_mb |= MBX_1;
  1006. mcp->mb[10] = fwopts[10];
  1007. mcp->out_mb |= MBX_10;
  1008. } else {
  1009. mcp->mb[10] = fwopts[10];
  1010. mcp->mb[11] = fwopts[11];
  1011. mcp->mb[12] = 0; /* Undocumented, but used */
  1012. mcp->out_mb |= MBX_12|MBX_11|MBX_10;
  1013. }
  1014. mcp->tov = MBX_TOV_SECONDS;
  1015. mcp->flags = 0;
  1016. rval = qla2x00_mailbox_command(vha, mcp);
  1017. fwopts[0] = mcp->mb[0];
  1018. if (rval != QLA_SUCCESS) {
  1019. /*EMPTY*/
  1020. ql_dbg(ql_dbg_mbx, vha, 0x1030,
  1021. "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
  1022. } else {
  1023. /*EMPTY*/
  1024. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
  1025. "Done %s.\n", __func__);
  1026. }
  1027. return rval;
  1028. }
  1029. /*
  1030. * qla2x00_mbx_reg_test
  1031. * Mailbox register wrap test.
  1032. *
  1033. * Input:
  1034. * ha = adapter block pointer.
  1035. * TARGET_QUEUE_LOCK must be released.
  1036. * ADAPTER_STATE_LOCK must be released.
  1037. *
  1038. * Returns:
  1039. * qla2x00 local function return status code.
  1040. *
  1041. * Context:
  1042. * Kernel context.
  1043. */
  1044. int
  1045. qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
  1046. {
  1047. int rval;
  1048. mbx_cmd_t mc;
  1049. mbx_cmd_t *mcp = &mc;
  1050. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
  1051. "Entered %s.\n", __func__);
  1052. mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
  1053. mcp->mb[1] = 0xAAAA;
  1054. mcp->mb[2] = 0x5555;
  1055. mcp->mb[3] = 0xAA55;
  1056. mcp->mb[4] = 0x55AA;
  1057. mcp->mb[5] = 0xA5A5;
  1058. mcp->mb[6] = 0x5A5A;
  1059. mcp->mb[7] = 0x2525;
  1060. mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  1061. mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  1062. mcp->tov = MBX_TOV_SECONDS;
  1063. mcp->flags = 0;
  1064. rval = qla2x00_mailbox_command(vha, mcp);
  1065. if (rval == QLA_SUCCESS) {
  1066. if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
  1067. mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
  1068. rval = QLA_FUNCTION_FAILED;
  1069. if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
  1070. mcp->mb[7] != 0x2525)
  1071. rval = QLA_FUNCTION_FAILED;
  1072. }
  1073. if (rval != QLA_SUCCESS) {
  1074. /*EMPTY*/
  1075. ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
  1076. } else {
  1077. /*EMPTY*/
  1078. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
  1079. "Done %s.\n", __func__);
  1080. }
  1081. return rval;
  1082. }
  1083. /*
  1084. * qla2x00_verify_checksum
  1085. * Verify firmware checksum.
  1086. *
  1087. * Input:
  1088. * ha = adapter block pointer.
  1089. * TARGET_QUEUE_LOCK must be released.
  1090. * ADAPTER_STATE_LOCK must be released.
  1091. *
  1092. * Returns:
  1093. * qla2x00 local function return status code.
  1094. *
  1095. * Context:
  1096. * Kernel context.
  1097. */
  1098. int
  1099. qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
  1100. {
  1101. int rval;
  1102. mbx_cmd_t mc;
  1103. mbx_cmd_t *mcp = &mc;
  1104. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
  1105. "Entered %s.\n", __func__);
  1106. mcp->mb[0] = MBC_VERIFY_CHECKSUM;
  1107. mcp->out_mb = MBX_0;
  1108. mcp->in_mb = MBX_0;
  1109. if (IS_FWI2_CAPABLE(vha->hw)) {
  1110. mcp->mb[1] = MSW(risc_addr);
  1111. mcp->mb[2] = LSW(risc_addr);
  1112. mcp->out_mb |= MBX_2|MBX_1;
  1113. mcp->in_mb |= MBX_2|MBX_1;
  1114. } else {
  1115. mcp->mb[1] = LSW(risc_addr);
  1116. mcp->out_mb |= MBX_1;
  1117. mcp->in_mb |= MBX_1;
  1118. }
  1119. mcp->tov = MBX_TOV_SECONDS;
  1120. mcp->flags = 0;
  1121. rval = qla2x00_mailbox_command(vha, mcp);
  1122. if (rval != QLA_SUCCESS) {
  1123. ql_dbg(ql_dbg_mbx, vha, 0x1036,
  1124. "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
  1125. (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
  1126. } else {
  1127. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
  1128. "Done %s.\n", __func__);
  1129. }
  1130. return rval;
  1131. }
  1132. /*
  1133. * qla2x00_issue_iocb
  1134. * Issue IOCB using mailbox command
  1135. *
  1136. * Input:
  1137. * ha = adapter state pointer.
  1138. * buffer = buffer pointer.
  1139. * phys_addr = physical address of buffer.
  1140. * size = size of buffer.
  1141. * TARGET_QUEUE_LOCK must be released.
  1142. * ADAPTER_STATE_LOCK must be released.
  1143. *
  1144. * Returns:
  1145. * qla2x00 local function return status code.
  1146. *
  1147. * Context:
  1148. * Kernel context.
  1149. */
  1150. int
  1151. qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
  1152. dma_addr_t phys_addr, size_t size, uint32_t tov)
  1153. {
  1154. int rval;
  1155. mbx_cmd_t mc;
  1156. mbx_cmd_t *mcp = &mc;
  1157. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
  1158. "Entered %s.\n", __func__);
  1159. mcp->mb[0] = MBC_IOCB_COMMAND_A64;
  1160. mcp->mb[1] = 0;
  1161. mcp->mb[2] = MSW(phys_addr);
  1162. mcp->mb[3] = LSW(phys_addr);
  1163. mcp->mb[6] = MSW(MSD(phys_addr));
  1164. mcp->mb[7] = LSW(MSD(phys_addr));
  1165. mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  1166. mcp->in_mb = MBX_2|MBX_0;
  1167. mcp->tov = tov;
  1168. mcp->flags = 0;
  1169. rval = qla2x00_mailbox_command(vha, mcp);
  1170. if (rval != QLA_SUCCESS) {
  1171. /*EMPTY*/
  1172. ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
  1173. } else {
  1174. sts_entry_t *sts_entry = (sts_entry_t *) buffer;
  1175. /* Mask reserved bits. */
  1176. sts_entry->entry_status &=
  1177. IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
  1178. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
  1179. "Done %s.\n", __func__);
  1180. }
  1181. return rval;
  1182. }
  1183. int
  1184. qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
  1185. size_t size)
  1186. {
  1187. return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
  1188. MBX_TOV_SECONDS);
  1189. }
  1190. /*
  1191. * qla2x00_abort_command
  1192. * Abort command aborts a specified IOCB.
  1193. *
  1194. * Input:
  1195. * ha = adapter block pointer.
  1196. * sp = SB structure pointer.
  1197. *
  1198. * Returns:
  1199. * qla2x00 local function return status code.
  1200. *
  1201. * Context:
  1202. * Kernel context.
  1203. */
  1204. int
  1205. qla2x00_abort_command(srb_t *sp)
  1206. {
  1207. unsigned long flags = 0;
  1208. int rval;
  1209. uint32_t handle = 0;
  1210. mbx_cmd_t mc;
  1211. mbx_cmd_t *mcp = &mc;
  1212. fc_port_t *fcport = sp->fcport;
  1213. scsi_qla_host_t *vha = fcport->vha;
  1214. struct qla_hw_data *ha = vha->hw;
  1215. struct req_que *req;
  1216. struct scsi_cmnd *cmd = GET_CMD_SP(sp);
  1217. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
  1218. "Entered %s.\n", __func__);
  1219. if (vha->flags.qpairs_available && sp->qpair)
  1220. req = sp->qpair->req;
  1221. else
  1222. req = vha->req;
  1223. spin_lock_irqsave(&ha->hardware_lock, flags);
  1224. for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
  1225. if (req->outstanding_cmds[handle] == sp)
  1226. break;
  1227. }
  1228. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  1229. if (handle == req->num_outstanding_cmds) {
  1230. /* command not found */
  1231. return QLA_FUNCTION_FAILED;
  1232. }
  1233. mcp->mb[0] = MBC_ABORT_COMMAND;
  1234. if (HAS_EXTENDED_IDS(ha))
  1235. mcp->mb[1] = fcport->loop_id;
  1236. else
  1237. mcp->mb[1] = fcport->loop_id << 8;
  1238. mcp->mb[2] = (uint16_t)handle;
  1239. mcp->mb[3] = (uint16_t)(handle >> 16);
  1240. mcp->mb[6] = (uint16_t)cmd->device->lun;
  1241. mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  1242. mcp->in_mb = MBX_0;
  1243. mcp->tov = MBX_TOV_SECONDS;
  1244. mcp->flags = 0;
  1245. rval = qla2x00_mailbox_command(vha, mcp);
  1246. if (rval != QLA_SUCCESS) {
  1247. ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
  1248. } else {
  1249. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
  1250. "Done %s.\n", __func__);
  1251. }
  1252. return rval;
  1253. }
  1254. int
  1255. qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
  1256. {
  1257. int rval, rval2;
  1258. mbx_cmd_t mc;
  1259. mbx_cmd_t *mcp = &mc;
  1260. scsi_qla_host_t *vha;
  1261. struct req_que *req;
  1262. struct rsp_que *rsp;
  1263. l = l;
  1264. vha = fcport->vha;
  1265. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
  1266. "Entered %s.\n", __func__);
  1267. req = vha->hw->req_q_map[0];
  1268. rsp = req->rsp;
  1269. mcp->mb[0] = MBC_ABORT_TARGET;
  1270. mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
  1271. if (HAS_EXTENDED_IDS(vha->hw)) {
  1272. mcp->mb[1] = fcport->loop_id;
  1273. mcp->mb[10] = 0;
  1274. mcp->out_mb |= MBX_10;
  1275. } else {
  1276. mcp->mb[1] = fcport->loop_id << 8;
  1277. }
  1278. mcp->mb[2] = vha->hw->loop_reset_delay;
  1279. mcp->mb[9] = vha->vp_idx;
  1280. mcp->in_mb = MBX_0;
  1281. mcp->tov = MBX_TOV_SECONDS;
  1282. mcp->flags = 0;
  1283. rval = qla2x00_mailbox_command(vha, mcp);
  1284. if (rval != QLA_SUCCESS) {
  1285. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
  1286. "Failed=%x.\n", rval);
  1287. }
  1288. /* Issue marker IOCB. */
  1289. rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
  1290. MK_SYNC_ID);
  1291. if (rval2 != QLA_SUCCESS) {
  1292. ql_dbg(ql_dbg_mbx, vha, 0x1040,
  1293. "Failed to issue marker IOCB (%x).\n", rval2);
  1294. } else {
  1295. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
  1296. "Done %s.\n", __func__);
  1297. }
  1298. return rval;
  1299. }
  1300. int
  1301. qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
  1302. {
  1303. int rval, rval2;
  1304. mbx_cmd_t mc;
  1305. mbx_cmd_t *mcp = &mc;
  1306. scsi_qla_host_t *vha;
  1307. struct req_que *req;
  1308. struct rsp_que *rsp;
  1309. vha = fcport->vha;
  1310. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
  1311. "Entered %s.\n", __func__);
  1312. req = vha->hw->req_q_map[0];
  1313. rsp = req->rsp;
  1314. mcp->mb[0] = MBC_LUN_RESET;
  1315. mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
  1316. if (HAS_EXTENDED_IDS(vha->hw))
  1317. mcp->mb[1] = fcport->loop_id;
  1318. else
  1319. mcp->mb[1] = fcport->loop_id << 8;
  1320. mcp->mb[2] = (u32)l;
  1321. mcp->mb[3] = 0;
  1322. mcp->mb[9] = vha->vp_idx;
  1323. mcp->in_mb = MBX_0;
  1324. mcp->tov = MBX_TOV_SECONDS;
  1325. mcp->flags = 0;
  1326. rval = qla2x00_mailbox_command(vha, mcp);
  1327. if (rval != QLA_SUCCESS) {
  1328. ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
  1329. }
  1330. /* Issue marker IOCB. */
  1331. rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
  1332. MK_SYNC_ID_LUN);
  1333. if (rval2 != QLA_SUCCESS) {
  1334. ql_dbg(ql_dbg_mbx, vha, 0x1044,
  1335. "Failed to issue marker IOCB (%x).\n", rval2);
  1336. } else {
  1337. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
  1338. "Done %s.\n", __func__);
  1339. }
  1340. return rval;
  1341. }
  1342. /*
  1343. * qla2x00_get_adapter_id
  1344. * Get adapter ID and topology.
  1345. *
  1346. * Input:
  1347. * ha = adapter block pointer.
  1348. * id = pointer for loop ID.
  1349. * al_pa = pointer for AL_PA.
  1350. * area = pointer for area.
  1351. * domain = pointer for domain.
  1352. * top = pointer for topology.
  1353. * TARGET_QUEUE_LOCK must be released.
  1354. * ADAPTER_STATE_LOCK must be released.
  1355. *
  1356. * Returns:
  1357. * qla2x00 local function return status code.
  1358. *
  1359. * Context:
  1360. * Kernel context.
  1361. */
  1362. int
  1363. qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
  1364. uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
  1365. {
  1366. int rval;
  1367. mbx_cmd_t mc;
  1368. mbx_cmd_t *mcp = &mc;
  1369. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
  1370. "Entered %s.\n", __func__);
  1371. mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
  1372. mcp->mb[9] = vha->vp_idx;
  1373. mcp->out_mb = MBX_9|MBX_0;
  1374. mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  1375. if (IS_CNA_CAPABLE(vha->hw))
  1376. mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
  1377. if (IS_FWI2_CAPABLE(vha->hw))
  1378. mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
  1379. if (IS_QLA27XX(vha->hw))
  1380. mcp->in_mb |= MBX_15;
  1381. mcp->tov = MBX_TOV_SECONDS;
  1382. mcp->flags = 0;
  1383. rval = qla2x00_mailbox_command(vha, mcp);
  1384. if (mcp->mb[0] == MBS_COMMAND_ERROR)
  1385. rval = QLA_COMMAND_ERROR;
  1386. else if (mcp->mb[0] == MBS_INVALID_COMMAND)
  1387. rval = QLA_INVALID_COMMAND;
  1388. /* Return data. */
  1389. *id = mcp->mb[1];
  1390. *al_pa = LSB(mcp->mb[2]);
  1391. *area = MSB(mcp->mb[2]);
  1392. *domain = LSB(mcp->mb[3]);
  1393. *top = mcp->mb[6];
  1394. *sw_cap = mcp->mb[7];
  1395. if (rval != QLA_SUCCESS) {
  1396. /*EMPTY*/
  1397. ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
  1398. } else {
  1399. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
  1400. "Done %s.\n", __func__);
  1401. if (IS_CNA_CAPABLE(vha->hw)) {
  1402. vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
  1403. vha->fcoe_fcf_idx = mcp->mb[10];
  1404. vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
  1405. vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
  1406. vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
  1407. vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
  1408. vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
  1409. vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
  1410. }
  1411. /* If FA-WWN supported */
  1412. if (IS_FAWWN_CAPABLE(vha->hw)) {
  1413. if (mcp->mb[7] & BIT_14) {
  1414. vha->port_name[0] = MSB(mcp->mb[16]);
  1415. vha->port_name[1] = LSB(mcp->mb[16]);
  1416. vha->port_name[2] = MSB(mcp->mb[17]);
  1417. vha->port_name[3] = LSB(mcp->mb[17]);
  1418. vha->port_name[4] = MSB(mcp->mb[18]);
  1419. vha->port_name[5] = LSB(mcp->mb[18]);
  1420. vha->port_name[6] = MSB(mcp->mb[19]);
  1421. vha->port_name[7] = LSB(mcp->mb[19]);
  1422. fc_host_port_name(vha->host) =
  1423. wwn_to_u64(vha->port_name);
  1424. ql_dbg(ql_dbg_mbx, vha, 0x10ca,
  1425. "FA-WWN acquired %016llx\n",
  1426. wwn_to_u64(vha->port_name));
  1427. }
  1428. }
  1429. if (IS_QLA27XX(vha->hw))
  1430. vha->bbcr = mcp->mb[15];
  1431. }
  1432. return rval;
  1433. }
  1434. /*
  1435. * qla2x00_get_retry_cnt
  1436. * Get current firmware login retry count and delay.
  1437. *
  1438. * Input:
  1439. * ha = adapter block pointer.
  1440. * retry_cnt = pointer to login retry count.
  1441. * tov = pointer to login timeout value.
  1442. *
  1443. * Returns:
  1444. * qla2x00 local function return status code.
  1445. *
  1446. * Context:
  1447. * Kernel context.
  1448. */
  1449. int
  1450. qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
  1451. uint16_t *r_a_tov)
  1452. {
  1453. int rval;
  1454. uint16_t ratov;
  1455. mbx_cmd_t mc;
  1456. mbx_cmd_t *mcp = &mc;
  1457. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
  1458. "Entered %s.\n", __func__);
  1459. mcp->mb[0] = MBC_GET_RETRY_COUNT;
  1460. mcp->out_mb = MBX_0;
  1461. mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  1462. mcp->tov = MBX_TOV_SECONDS;
  1463. mcp->flags = 0;
  1464. rval = qla2x00_mailbox_command(vha, mcp);
  1465. if (rval != QLA_SUCCESS) {
  1466. /*EMPTY*/
  1467. ql_dbg(ql_dbg_mbx, vha, 0x104a,
  1468. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  1469. } else {
  1470. /* Convert returned data and check our values. */
  1471. *r_a_tov = mcp->mb[3] / 2;
  1472. ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
  1473. if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
  1474. /* Update to the larger values */
  1475. *retry_cnt = (uint8_t)mcp->mb[1];
  1476. *tov = ratov;
  1477. }
  1478. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
  1479. "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
  1480. }
  1481. return rval;
  1482. }
  1483. /*
  1484. * qla2x00_init_firmware
  1485. * Initialize adapter firmware.
  1486. *
  1487. * Input:
  1488. * ha = adapter block pointer.
  1489. * dptr = Initialization control block pointer.
  1490. * size = size of initialization control block.
  1491. * TARGET_QUEUE_LOCK must be released.
  1492. * ADAPTER_STATE_LOCK must be released.
  1493. *
  1494. * Returns:
  1495. * qla2x00 local function return status code.
  1496. *
  1497. * Context:
  1498. * Kernel context.
  1499. */
  1500. int
  1501. qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
  1502. {
  1503. int rval;
  1504. mbx_cmd_t mc;
  1505. mbx_cmd_t *mcp = &mc;
  1506. struct qla_hw_data *ha = vha->hw;
  1507. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
  1508. "Entered %s.\n", __func__);
  1509. if (IS_P3P_TYPE(ha) && ql2xdbwr)
  1510. qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
  1511. (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
  1512. if (ha->flags.npiv_supported)
  1513. mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
  1514. else
  1515. mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
  1516. mcp->mb[1] = 0;
  1517. mcp->mb[2] = MSW(ha->init_cb_dma);
  1518. mcp->mb[3] = LSW(ha->init_cb_dma);
  1519. mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
  1520. mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
  1521. mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  1522. if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
  1523. mcp->mb[1] = BIT_0;
  1524. mcp->mb[10] = MSW(ha->ex_init_cb_dma);
  1525. mcp->mb[11] = LSW(ha->ex_init_cb_dma);
  1526. mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
  1527. mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
  1528. mcp->mb[14] = sizeof(*ha->ex_init_cb);
  1529. mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
  1530. }
  1531. /* 1 and 2 should normally be captured. */
  1532. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  1533. if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
  1534. /* mb3 is additional info about the installed SFP. */
  1535. mcp->in_mb |= MBX_3;
  1536. mcp->buf_size = size;
  1537. mcp->flags = MBX_DMA_OUT;
  1538. mcp->tov = MBX_TOV_SECONDS;
  1539. rval = qla2x00_mailbox_command(vha, mcp);
  1540. if (rval != QLA_SUCCESS) {
  1541. /*EMPTY*/
  1542. ql_dbg(ql_dbg_mbx, vha, 0x104d,
  1543. "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
  1544. rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
  1545. } else {
  1546. if (IS_QLA27XX(ha)) {
  1547. if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
  1548. ql_dbg(ql_dbg_mbx, vha, 0x119d,
  1549. "Invalid SFP/Validation Failed\n");
  1550. }
  1551. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
  1552. "Done %s.\n", __func__);
  1553. }
  1554. return rval;
  1555. }
  1556. /*
  1557. * qla2x00_get_port_database
  1558. * Issue normal/enhanced get port database mailbox command
  1559. * and copy device name as necessary.
  1560. *
  1561. * Input:
  1562. * ha = adapter state pointer.
  1563. * dev = structure pointer.
  1564. * opt = enhanced cmd option byte.
  1565. *
  1566. * Returns:
  1567. * qla2x00 local function return status code.
  1568. *
  1569. * Context:
  1570. * Kernel context.
  1571. */
  1572. int
  1573. qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
  1574. {
  1575. int rval;
  1576. mbx_cmd_t mc;
  1577. mbx_cmd_t *mcp = &mc;
  1578. port_database_t *pd;
  1579. struct port_database_24xx *pd24;
  1580. dma_addr_t pd_dma;
  1581. struct qla_hw_data *ha = vha->hw;
  1582. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
  1583. "Entered %s.\n", __func__);
  1584. pd24 = NULL;
  1585. pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
  1586. if (pd == NULL) {
  1587. ql_log(ql_log_warn, vha, 0x1050,
  1588. "Failed to allocate port database structure.\n");
  1589. return QLA_MEMORY_ALLOC_FAILED;
  1590. }
  1591. memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
  1592. mcp->mb[0] = MBC_GET_PORT_DATABASE;
  1593. if (opt != 0 && !IS_FWI2_CAPABLE(ha))
  1594. mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
  1595. mcp->mb[2] = MSW(pd_dma);
  1596. mcp->mb[3] = LSW(pd_dma);
  1597. mcp->mb[6] = MSW(MSD(pd_dma));
  1598. mcp->mb[7] = LSW(MSD(pd_dma));
  1599. mcp->mb[9] = vha->vp_idx;
  1600. mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
  1601. mcp->in_mb = MBX_0;
  1602. if (IS_FWI2_CAPABLE(ha)) {
  1603. mcp->mb[1] = fcport->loop_id;
  1604. mcp->mb[10] = opt;
  1605. mcp->out_mb |= MBX_10|MBX_1;
  1606. mcp->in_mb |= MBX_1;
  1607. } else if (HAS_EXTENDED_IDS(ha)) {
  1608. mcp->mb[1] = fcport->loop_id;
  1609. mcp->mb[10] = opt;
  1610. mcp->out_mb |= MBX_10|MBX_1;
  1611. } else {
  1612. mcp->mb[1] = fcport->loop_id << 8 | opt;
  1613. mcp->out_mb |= MBX_1;
  1614. }
  1615. mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
  1616. PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
  1617. mcp->flags = MBX_DMA_IN;
  1618. mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
  1619. rval = qla2x00_mailbox_command(vha, mcp);
  1620. if (rval != QLA_SUCCESS)
  1621. goto gpd_error_out;
  1622. if (IS_FWI2_CAPABLE(ha)) {
  1623. uint64_t zero = 0;
  1624. pd24 = (struct port_database_24xx *) pd;
  1625. /* Check for logged in state. */
  1626. if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
  1627. pd24->last_login_state != PDS_PRLI_COMPLETE) {
  1628. ql_dbg(ql_dbg_mbx, vha, 0x1051,
  1629. "Unable to verify login-state (%x/%x) for "
  1630. "loop_id %x.\n", pd24->current_login_state,
  1631. pd24->last_login_state, fcport->loop_id);
  1632. rval = QLA_FUNCTION_FAILED;
  1633. goto gpd_error_out;
  1634. }
  1635. if (fcport->loop_id == FC_NO_LOOP_ID ||
  1636. (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
  1637. memcmp(fcport->port_name, pd24->port_name, 8))) {
  1638. /* We lost the device mid way. */
  1639. rval = QLA_NOT_LOGGED_IN;
  1640. goto gpd_error_out;
  1641. }
  1642. /* Names are little-endian. */
  1643. memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
  1644. memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
  1645. /* Get port_id of device. */
  1646. fcport->d_id.b.domain = pd24->port_id[0];
  1647. fcport->d_id.b.area = pd24->port_id[1];
  1648. fcport->d_id.b.al_pa = pd24->port_id[2];
  1649. fcport->d_id.b.rsvd_1 = 0;
  1650. /* If not target must be initiator or unknown type. */
  1651. if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
  1652. fcport->port_type = FCT_INITIATOR;
  1653. else
  1654. fcport->port_type = FCT_TARGET;
  1655. /* Passback COS information. */
  1656. fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
  1657. FC_COS_CLASS2 : FC_COS_CLASS3;
  1658. if (pd24->prli_svc_param_word_3[0] & BIT_7)
  1659. fcport->flags |= FCF_CONF_COMP_SUPPORTED;
  1660. } else {
  1661. uint64_t zero = 0;
  1662. /* Check for logged in state. */
  1663. if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
  1664. pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
  1665. ql_dbg(ql_dbg_mbx, vha, 0x100a,
  1666. "Unable to verify login-state (%x/%x) - "
  1667. "portid=%02x%02x%02x.\n", pd->master_state,
  1668. pd->slave_state, fcport->d_id.b.domain,
  1669. fcport->d_id.b.area, fcport->d_id.b.al_pa);
  1670. rval = QLA_FUNCTION_FAILED;
  1671. goto gpd_error_out;
  1672. }
  1673. if (fcport->loop_id == FC_NO_LOOP_ID ||
  1674. (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
  1675. memcmp(fcport->port_name, pd->port_name, 8))) {
  1676. /* We lost the device mid way. */
  1677. rval = QLA_NOT_LOGGED_IN;
  1678. goto gpd_error_out;
  1679. }
  1680. /* Names are little-endian. */
  1681. memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
  1682. memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
  1683. /* Get port_id of device. */
  1684. fcport->d_id.b.domain = pd->port_id[0];
  1685. fcport->d_id.b.area = pd->port_id[3];
  1686. fcport->d_id.b.al_pa = pd->port_id[2];
  1687. fcport->d_id.b.rsvd_1 = 0;
  1688. /* If not target must be initiator or unknown type. */
  1689. if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
  1690. fcport->port_type = FCT_INITIATOR;
  1691. else
  1692. fcport->port_type = FCT_TARGET;
  1693. /* Passback COS information. */
  1694. fcport->supported_classes = (pd->options & BIT_4) ?
  1695. FC_COS_CLASS2: FC_COS_CLASS3;
  1696. }
  1697. gpd_error_out:
  1698. dma_pool_free(ha->s_dma_pool, pd, pd_dma);
  1699. if (rval != QLA_SUCCESS) {
  1700. ql_dbg(ql_dbg_mbx, vha, 0x1052,
  1701. "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
  1702. mcp->mb[0], mcp->mb[1]);
  1703. } else {
  1704. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
  1705. "Done %s.\n", __func__);
  1706. }
  1707. return rval;
  1708. }
  1709. /*
  1710. * qla2x00_get_firmware_state
  1711. * Get adapter firmware state.
  1712. *
  1713. * Input:
  1714. * ha = adapter block pointer.
  1715. * dptr = pointer for firmware state.
  1716. * TARGET_QUEUE_LOCK must be released.
  1717. * ADAPTER_STATE_LOCK must be released.
  1718. *
  1719. * Returns:
  1720. * qla2x00 local function return status code.
  1721. *
  1722. * Context:
  1723. * Kernel context.
  1724. */
  1725. int
  1726. qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
  1727. {
  1728. int rval;
  1729. mbx_cmd_t mc;
  1730. mbx_cmd_t *mcp = &mc;
  1731. struct qla_hw_data *ha = vha->hw;
  1732. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
  1733. "Entered %s.\n", __func__);
  1734. mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
  1735. mcp->out_mb = MBX_0;
  1736. if (IS_FWI2_CAPABLE(vha->hw))
  1737. mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  1738. else
  1739. mcp->in_mb = MBX_1|MBX_0;
  1740. mcp->tov = MBX_TOV_SECONDS;
  1741. mcp->flags = 0;
  1742. rval = qla2x00_mailbox_command(vha, mcp);
  1743. /* Return firmware states. */
  1744. states[0] = mcp->mb[1];
  1745. if (IS_FWI2_CAPABLE(vha->hw)) {
  1746. states[1] = mcp->mb[2];
  1747. states[2] = mcp->mb[3]; /* SFP info */
  1748. states[3] = mcp->mb[4];
  1749. states[4] = mcp->mb[5];
  1750. states[5] = mcp->mb[6]; /* DPORT status */
  1751. }
  1752. if (rval != QLA_SUCCESS) {
  1753. /*EMPTY*/
  1754. ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
  1755. } else {
  1756. if (IS_QLA27XX(ha)) {
  1757. if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
  1758. ql_dbg(ql_dbg_mbx, vha, 0x119e,
  1759. "Invalid SFP/Validation Failed\n");
  1760. }
  1761. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
  1762. "Done %s.\n", __func__);
  1763. }
  1764. return rval;
  1765. }
  1766. /*
  1767. * qla2x00_get_port_name
  1768. * Issue get port name mailbox command.
  1769. * Returned name is in big endian format.
  1770. *
  1771. * Input:
  1772. * ha = adapter block pointer.
  1773. * loop_id = loop ID of device.
  1774. * name = pointer for name.
  1775. * TARGET_QUEUE_LOCK must be released.
  1776. * ADAPTER_STATE_LOCK must be released.
  1777. *
  1778. * Returns:
  1779. * qla2x00 local function return status code.
  1780. *
  1781. * Context:
  1782. * Kernel context.
  1783. */
  1784. int
  1785. qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
  1786. uint8_t opt)
  1787. {
  1788. int rval;
  1789. mbx_cmd_t mc;
  1790. mbx_cmd_t *mcp = &mc;
  1791. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
  1792. "Entered %s.\n", __func__);
  1793. mcp->mb[0] = MBC_GET_PORT_NAME;
  1794. mcp->mb[9] = vha->vp_idx;
  1795. mcp->out_mb = MBX_9|MBX_1|MBX_0;
  1796. if (HAS_EXTENDED_IDS(vha->hw)) {
  1797. mcp->mb[1] = loop_id;
  1798. mcp->mb[10] = opt;
  1799. mcp->out_mb |= MBX_10;
  1800. } else {
  1801. mcp->mb[1] = loop_id << 8 | opt;
  1802. }
  1803. mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  1804. mcp->tov = MBX_TOV_SECONDS;
  1805. mcp->flags = 0;
  1806. rval = qla2x00_mailbox_command(vha, mcp);
  1807. if (rval != QLA_SUCCESS) {
  1808. /*EMPTY*/
  1809. ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
  1810. } else {
  1811. if (name != NULL) {
  1812. /* This function returns name in big endian. */
  1813. name[0] = MSB(mcp->mb[2]);
  1814. name[1] = LSB(mcp->mb[2]);
  1815. name[2] = MSB(mcp->mb[3]);
  1816. name[3] = LSB(mcp->mb[3]);
  1817. name[4] = MSB(mcp->mb[6]);
  1818. name[5] = LSB(mcp->mb[6]);
  1819. name[6] = MSB(mcp->mb[7]);
  1820. name[7] = LSB(mcp->mb[7]);
  1821. }
  1822. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
  1823. "Done %s.\n", __func__);
  1824. }
  1825. return rval;
  1826. }
  1827. /*
  1828. * qla24xx_link_initialization
  1829. * Issue link initialization mailbox command.
  1830. *
  1831. * Input:
  1832. * ha = adapter block pointer.
  1833. * TARGET_QUEUE_LOCK must be released.
  1834. * ADAPTER_STATE_LOCK must be released.
  1835. *
  1836. * Returns:
  1837. * qla2x00 local function return status code.
  1838. *
  1839. * Context:
  1840. * Kernel context.
  1841. */
  1842. int
  1843. qla24xx_link_initialize(scsi_qla_host_t *vha)
  1844. {
  1845. int rval;
  1846. mbx_cmd_t mc;
  1847. mbx_cmd_t *mcp = &mc;
  1848. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
  1849. "Entered %s.\n", __func__);
  1850. if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
  1851. return QLA_FUNCTION_FAILED;
  1852. mcp->mb[0] = MBC_LINK_INITIALIZATION;
  1853. mcp->mb[1] = BIT_4;
  1854. if (vha->hw->operating_mode == LOOP)
  1855. mcp->mb[1] |= BIT_6;
  1856. else
  1857. mcp->mb[1] |= BIT_5;
  1858. mcp->mb[2] = 0;
  1859. mcp->mb[3] = 0;
  1860. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  1861. mcp->in_mb = MBX_0;
  1862. mcp->tov = MBX_TOV_SECONDS;
  1863. mcp->flags = 0;
  1864. rval = qla2x00_mailbox_command(vha, mcp);
  1865. if (rval != QLA_SUCCESS) {
  1866. ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
  1867. } else {
  1868. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
  1869. "Done %s.\n", __func__);
  1870. }
  1871. return rval;
  1872. }
  1873. /*
  1874. * qla2x00_lip_reset
  1875. * Issue LIP reset mailbox command.
  1876. *
  1877. * Input:
  1878. * ha = adapter block pointer.
  1879. * TARGET_QUEUE_LOCK must be released.
  1880. * ADAPTER_STATE_LOCK must be released.
  1881. *
  1882. * Returns:
  1883. * qla2x00 local function return status code.
  1884. *
  1885. * Context:
  1886. * Kernel context.
  1887. */
  1888. int
  1889. qla2x00_lip_reset(scsi_qla_host_t *vha)
  1890. {
  1891. int rval;
  1892. mbx_cmd_t mc;
  1893. mbx_cmd_t *mcp = &mc;
  1894. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
  1895. "Entered %s.\n", __func__);
  1896. if (IS_CNA_CAPABLE(vha->hw)) {
  1897. /* Logout across all FCFs. */
  1898. mcp->mb[0] = MBC_LIP_FULL_LOGIN;
  1899. mcp->mb[1] = BIT_1;
  1900. mcp->mb[2] = 0;
  1901. mcp->out_mb = MBX_2|MBX_1|MBX_0;
  1902. } else if (IS_FWI2_CAPABLE(vha->hw)) {
  1903. mcp->mb[0] = MBC_LIP_FULL_LOGIN;
  1904. mcp->mb[1] = BIT_6;
  1905. mcp->mb[2] = 0;
  1906. mcp->mb[3] = vha->hw->loop_reset_delay;
  1907. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  1908. } else {
  1909. mcp->mb[0] = MBC_LIP_RESET;
  1910. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  1911. if (HAS_EXTENDED_IDS(vha->hw)) {
  1912. mcp->mb[1] = 0x00ff;
  1913. mcp->mb[10] = 0;
  1914. mcp->out_mb |= MBX_10;
  1915. } else {
  1916. mcp->mb[1] = 0xff00;
  1917. }
  1918. mcp->mb[2] = vha->hw->loop_reset_delay;
  1919. mcp->mb[3] = 0;
  1920. }
  1921. mcp->in_mb = MBX_0;
  1922. mcp->tov = MBX_TOV_SECONDS;
  1923. mcp->flags = 0;
  1924. rval = qla2x00_mailbox_command(vha, mcp);
  1925. if (rval != QLA_SUCCESS) {
  1926. /*EMPTY*/
  1927. ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
  1928. } else {
  1929. /*EMPTY*/
  1930. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
  1931. "Done %s.\n", __func__);
  1932. }
  1933. return rval;
  1934. }
  1935. /*
  1936. * qla2x00_send_sns
  1937. * Send SNS command.
  1938. *
  1939. * Input:
  1940. * ha = adapter block pointer.
  1941. * sns = pointer for command.
  1942. * cmd_size = command size.
  1943. * buf_size = response/command size.
  1944. * TARGET_QUEUE_LOCK must be released.
  1945. * ADAPTER_STATE_LOCK must be released.
  1946. *
  1947. * Returns:
  1948. * qla2x00 local function return status code.
  1949. *
  1950. * Context:
  1951. * Kernel context.
  1952. */
  1953. int
  1954. qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
  1955. uint16_t cmd_size, size_t buf_size)
  1956. {
  1957. int rval;
  1958. mbx_cmd_t mc;
  1959. mbx_cmd_t *mcp = &mc;
  1960. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
  1961. "Entered %s.\n", __func__);
  1962. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
  1963. "Retry cnt=%d ratov=%d total tov=%d.\n",
  1964. vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
  1965. mcp->mb[0] = MBC_SEND_SNS_COMMAND;
  1966. mcp->mb[1] = cmd_size;
  1967. mcp->mb[2] = MSW(sns_phys_address);
  1968. mcp->mb[3] = LSW(sns_phys_address);
  1969. mcp->mb[6] = MSW(MSD(sns_phys_address));
  1970. mcp->mb[7] = LSW(MSD(sns_phys_address));
  1971. mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  1972. mcp->in_mb = MBX_0|MBX_1;
  1973. mcp->buf_size = buf_size;
  1974. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
  1975. mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
  1976. rval = qla2x00_mailbox_command(vha, mcp);
  1977. if (rval != QLA_SUCCESS) {
  1978. /*EMPTY*/
  1979. ql_dbg(ql_dbg_mbx, vha, 0x105f,
  1980. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  1981. rval, mcp->mb[0], mcp->mb[1]);
  1982. } else {
  1983. /*EMPTY*/
  1984. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
  1985. "Done %s.\n", __func__);
  1986. }
  1987. return rval;
  1988. }
  1989. int
  1990. qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
  1991. uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
  1992. {
  1993. int rval;
  1994. struct logio_entry_24xx *lg;
  1995. dma_addr_t lg_dma;
  1996. uint32_t iop[2];
  1997. struct qla_hw_data *ha = vha->hw;
  1998. struct req_que *req;
  1999. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
  2000. "Entered %s.\n", __func__);
  2001. if (vha->vp_idx && vha->qpair)
  2002. req = vha->qpair->req;
  2003. else
  2004. req = ha->req_q_map[0];
  2005. lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
  2006. if (lg == NULL) {
  2007. ql_log(ql_log_warn, vha, 0x1062,
  2008. "Failed to allocate login IOCB.\n");
  2009. return QLA_MEMORY_ALLOC_FAILED;
  2010. }
  2011. memset(lg, 0, sizeof(struct logio_entry_24xx));
  2012. lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
  2013. lg->entry_count = 1;
  2014. lg->handle = MAKE_HANDLE(req->id, lg->handle);
  2015. lg->nport_handle = cpu_to_le16(loop_id);
  2016. lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
  2017. if (opt & BIT_0)
  2018. lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
  2019. if (opt & BIT_1)
  2020. lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
  2021. lg->port_id[0] = al_pa;
  2022. lg->port_id[1] = area;
  2023. lg->port_id[2] = domain;
  2024. lg->vp_index = vha->vp_idx;
  2025. rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
  2026. (ha->r_a_tov / 10 * 2) + 2);
  2027. if (rval != QLA_SUCCESS) {
  2028. ql_dbg(ql_dbg_mbx, vha, 0x1063,
  2029. "Failed to issue login IOCB (%x).\n", rval);
  2030. } else if (lg->entry_status != 0) {
  2031. ql_dbg(ql_dbg_mbx, vha, 0x1064,
  2032. "Failed to complete IOCB -- error status (%x).\n",
  2033. lg->entry_status);
  2034. rval = QLA_FUNCTION_FAILED;
  2035. } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
  2036. iop[0] = le32_to_cpu(lg->io_parameter[0]);
  2037. iop[1] = le32_to_cpu(lg->io_parameter[1]);
  2038. ql_dbg(ql_dbg_mbx, vha, 0x1065,
  2039. "Failed to complete IOCB -- completion status (%x) "
  2040. "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
  2041. iop[0], iop[1]);
  2042. switch (iop[0]) {
  2043. case LSC_SCODE_PORTID_USED:
  2044. mb[0] = MBS_PORT_ID_USED;
  2045. mb[1] = LSW(iop[1]);
  2046. break;
  2047. case LSC_SCODE_NPORT_USED:
  2048. mb[0] = MBS_LOOP_ID_USED;
  2049. break;
  2050. case LSC_SCODE_NOLINK:
  2051. case LSC_SCODE_NOIOCB:
  2052. case LSC_SCODE_NOXCB:
  2053. case LSC_SCODE_CMD_FAILED:
  2054. case LSC_SCODE_NOFABRIC:
  2055. case LSC_SCODE_FW_NOT_READY:
  2056. case LSC_SCODE_NOT_LOGGED_IN:
  2057. case LSC_SCODE_NOPCB:
  2058. case LSC_SCODE_ELS_REJECT:
  2059. case LSC_SCODE_CMD_PARAM_ERR:
  2060. case LSC_SCODE_NONPORT:
  2061. case LSC_SCODE_LOGGED_IN:
  2062. case LSC_SCODE_NOFLOGI_ACC:
  2063. default:
  2064. mb[0] = MBS_COMMAND_ERROR;
  2065. break;
  2066. }
  2067. } else {
  2068. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
  2069. "Done %s.\n", __func__);
  2070. iop[0] = le32_to_cpu(lg->io_parameter[0]);
  2071. mb[0] = MBS_COMMAND_COMPLETE;
  2072. mb[1] = 0;
  2073. if (iop[0] & BIT_4) {
  2074. if (iop[0] & BIT_8)
  2075. mb[1] |= BIT_1;
  2076. } else
  2077. mb[1] = BIT_0;
  2078. /* Passback COS information. */
  2079. mb[10] = 0;
  2080. if (lg->io_parameter[7] || lg->io_parameter[8])
  2081. mb[10] |= BIT_0; /* Class 2. */
  2082. if (lg->io_parameter[9] || lg->io_parameter[10])
  2083. mb[10] |= BIT_1; /* Class 3. */
  2084. if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
  2085. mb[10] |= BIT_7; /* Confirmed Completion
  2086. * Allowed
  2087. */
  2088. }
  2089. dma_pool_free(ha->s_dma_pool, lg, lg_dma);
  2090. return rval;
  2091. }
  2092. /*
  2093. * qla2x00_login_fabric
  2094. * Issue login fabric port mailbox command.
  2095. *
  2096. * Input:
  2097. * ha = adapter block pointer.
  2098. * loop_id = device loop ID.
  2099. * domain = device domain.
  2100. * area = device area.
  2101. * al_pa = device AL_PA.
  2102. * status = pointer for return status.
  2103. * opt = command options.
  2104. * TARGET_QUEUE_LOCK must be released.
  2105. * ADAPTER_STATE_LOCK must be released.
  2106. *
  2107. * Returns:
  2108. * qla2x00 local function return status code.
  2109. *
  2110. * Context:
  2111. * Kernel context.
  2112. */
  2113. int
  2114. qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
  2115. uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
  2116. {
  2117. int rval;
  2118. mbx_cmd_t mc;
  2119. mbx_cmd_t *mcp = &mc;
  2120. struct qla_hw_data *ha = vha->hw;
  2121. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
  2122. "Entered %s.\n", __func__);
  2123. mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
  2124. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  2125. if (HAS_EXTENDED_IDS(ha)) {
  2126. mcp->mb[1] = loop_id;
  2127. mcp->mb[10] = opt;
  2128. mcp->out_mb |= MBX_10;
  2129. } else {
  2130. mcp->mb[1] = (loop_id << 8) | opt;
  2131. }
  2132. mcp->mb[2] = domain;
  2133. mcp->mb[3] = area << 8 | al_pa;
  2134. mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
  2135. mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
  2136. mcp->flags = 0;
  2137. rval = qla2x00_mailbox_command(vha, mcp);
  2138. /* Return mailbox statuses. */
  2139. if (mb != NULL) {
  2140. mb[0] = mcp->mb[0];
  2141. mb[1] = mcp->mb[1];
  2142. mb[2] = mcp->mb[2];
  2143. mb[6] = mcp->mb[6];
  2144. mb[7] = mcp->mb[7];
  2145. /* COS retrieved from Get-Port-Database mailbox command. */
  2146. mb[10] = 0;
  2147. }
  2148. if (rval != QLA_SUCCESS) {
  2149. /* RLU tmp code: need to change main mailbox_command function to
  2150. * return ok even when the mailbox completion value is not
  2151. * SUCCESS. The caller needs to be responsible to interpret
  2152. * the return values of this mailbox command if we're not
  2153. * to change too much of the existing code.
  2154. */
  2155. if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
  2156. mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
  2157. mcp->mb[0] == 0x4006)
  2158. rval = QLA_SUCCESS;
  2159. /*EMPTY*/
  2160. ql_dbg(ql_dbg_mbx, vha, 0x1068,
  2161. "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
  2162. rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
  2163. } else {
  2164. /*EMPTY*/
  2165. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
  2166. "Done %s.\n", __func__);
  2167. }
  2168. return rval;
  2169. }
  2170. /*
  2171. * qla2x00_login_local_device
  2172. * Issue login loop port mailbox command.
  2173. *
  2174. * Input:
  2175. * ha = adapter block pointer.
  2176. * loop_id = device loop ID.
  2177. * opt = command options.
  2178. *
  2179. * Returns:
  2180. * Return status code.
  2181. *
  2182. * Context:
  2183. * Kernel context.
  2184. *
  2185. */
  2186. int
  2187. qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
  2188. uint16_t *mb_ret, uint8_t opt)
  2189. {
  2190. int rval;
  2191. mbx_cmd_t mc;
  2192. mbx_cmd_t *mcp = &mc;
  2193. struct qla_hw_data *ha = vha->hw;
  2194. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
  2195. "Entered %s.\n", __func__);
  2196. if (IS_FWI2_CAPABLE(ha))
  2197. return qla24xx_login_fabric(vha, fcport->loop_id,
  2198. fcport->d_id.b.domain, fcport->d_id.b.area,
  2199. fcport->d_id.b.al_pa, mb_ret, opt);
  2200. mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
  2201. if (HAS_EXTENDED_IDS(ha))
  2202. mcp->mb[1] = fcport->loop_id;
  2203. else
  2204. mcp->mb[1] = fcport->loop_id << 8;
  2205. mcp->mb[2] = opt;
  2206. mcp->out_mb = MBX_2|MBX_1|MBX_0;
  2207. mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
  2208. mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
  2209. mcp->flags = 0;
  2210. rval = qla2x00_mailbox_command(vha, mcp);
  2211. /* Return mailbox statuses. */
  2212. if (mb_ret != NULL) {
  2213. mb_ret[0] = mcp->mb[0];
  2214. mb_ret[1] = mcp->mb[1];
  2215. mb_ret[6] = mcp->mb[6];
  2216. mb_ret[7] = mcp->mb[7];
  2217. }
  2218. if (rval != QLA_SUCCESS) {
  2219. /* AV tmp code: need to change main mailbox_command function to
  2220. * return ok even when the mailbox completion value is not
  2221. * SUCCESS. The caller needs to be responsible to interpret
  2222. * the return values of this mailbox command if we're not
  2223. * to change too much of the existing code.
  2224. */
  2225. if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
  2226. rval = QLA_SUCCESS;
  2227. ql_dbg(ql_dbg_mbx, vha, 0x106b,
  2228. "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
  2229. rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
  2230. } else {
  2231. /*EMPTY*/
  2232. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
  2233. "Done %s.\n", __func__);
  2234. }
  2235. return (rval);
  2236. }
  2237. int
  2238. qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
  2239. uint8_t area, uint8_t al_pa)
  2240. {
  2241. int rval;
  2242. struct logio_entry_24xx *lg;
  2243. dma_addr_t lg_dma;
  2244. struct qla_hw_data *ha = vha->hw;
  2245. struct req_que *req;
  2246. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
  2247. "Entered %s.\n", __func__);
  2248. lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
  2249. if (lg == NULL) {
  2250. ql_log(ql_log_warn, vha, 0x106e,
  2251. "Failed to allocate logout IOCB.\n");
  2252. return QLA_MEMORY_ALLOC_FAILED;
  2253. }
  2254. memset(lg, 0, sizeof(struct logio_entry_24xx));
  2255. req = vha->req;
  2256. lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
  2257. lg->entry_count = 1;
  2258. lg->handle = MAKE_HANDLE(req->id, lg->handle);
  2259. lg->nport_handle = cpu_to_le16(loop_id);
  2260. lg->control_flags =
  2261. cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
  2262. LCF_FREE_NPORT);
  2263. lg->port_id[0] = al_pa;
  2264. lg->port_id[1] = area;
  2265. lg->port_id[2] = domain;
  2266. lg->vp_index = vha->vp_idx;
  2267. rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
  2268. (ha->r_a_tov / 10 * 2) + 2);
  2269. if (rval != QLA_SUCCESS) {
  2270. ql_dbg(ql_dbg_mbx, vha, 0x106f,
  2271. "Failed to issue logout IOCB (%x).\n", rval);
  2272. } else if (lg->entry_status != 0) {
  2273. ql_dbg(ql_dbg_mbx, vha, 0x1070,
  2274. "Failed to complete IOCB -- error status (%x).\n",
  2275. lg->entry_status);
  2276. rval = QLA_FUNCTION_FAILED;
  2277. } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
  2278. ql_dbg(ql_dbg_mbx, vha, 0x1071,
  2279. "Failed to complete IOCB -- completion status (%x) "
  2280. "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
  2281. le32_to_cpu(lg->io_parameter[0]),
  2282. le32_to_cpu(lg->io_parameter[1]));
  2283. } else {
  2284. /*EMPTY*/
  2285. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
  2286. "Done %s.\n", __func__);
  2287. }
  2288. dma_pool_free(ha->s_dma_pool, lg, lg_dma);
  2289. return rval;
  2290. }
  2291. /*
  2292. * qla2x00_fabric_logout
  2293. * Issue logout fabric port mailbox command.
  2294. *
  2295. * Input:
  2296. * ha = adapter block pointer.
  2297. * loop_id = device loop ID.
  2298. * TARGET_QUEUE_LOCK must be released.
  2299. * ADAPTER_STATE_LOCK must be released.
  2300. *
  2301. * Returns:
  2302. * qla2x00 local function return status code.
  2303. *
  2304. * Context:
  2305. * Kernel context.
  2306. */
  2307. int
  2308. qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
  2309. uint8_t area, uint8_t al_pa)
  2310. {
  2311. int rval;
  2312. mbx_cmd_t mc;
  2313. mbx_cmd_t *mcp = &mc;
  2314. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
  2315. "Entered %s.\n", __func__);
  2316. mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
  2317. mcp->out_mb = MBX_1|MBX_0;
  2318. if (HAS_EXTENDED_IDS(vha->hw)) {
  2319. mcp->mb[1] = loop_id;
  2320. mcp->mb[10] = 0;
  2321. mcp->out_mb |= MBX_10;
  2322. } else {
  2323. mcp->mb[1] = loop_id << 8;
  2324. }
  2325. mcp->in_mb = MBX_1|MBX_0;
  2326. mcp->tov = MBX_TOV_SECONDS;
  2327. mcp->flags = 0;
  2328. rval = qla2x00_mailbox_command(vha, mcp);
  2329. if (rval != QLA_SUCCESS) {
  2330. /*EMPTY*/
  2331. ql_dbg(ql_dbg_mbx, vha, 0x1074,
  2332. "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
  2333. } else {
  2334. /*EMPTY*/
  2335. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
  2336. "Done %s.\n", __func__);
  2337. }
  2338. return rval;
  2339. }
  2340. /*
  2341. * qla2x00_full_login_lip
  2342. * Issue full login LIP mailbox command.
  2343. *
  2344. * Input:
  2345. * ha = adapter block pointer.
  2346. * TARGET_QUEUE_LOCK must be released.
  2347. * ADAPTER_STATE_LOCK must be released.
  2348. *
  2349. * Returns:
  2350. * qla2x00 local function return status code.
  2351. *
  2352. * Context:
  2353. * Kernel context.
  2354. */
  2355. int
  2356. qla2x00_full_login_lip(scsi_qla_host_t *vha)
  2357. {
  2358. int rval;
  2359. mbx_cmd_t mc;
  2360. mbx_cmd_t *mcp = &mc;
  2361. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
  2362. "Entered %s.\n", __func__);
  2363. mcp->mb[0] = MBC_LIP_FULL_LOGIN;
  2364. mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
  2365. mcp->mb[2] = 0;
  2366. mcp->mb[3] = 0;
  2367. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  2368. mcp->in_mb = MBX_0;
  2369. mcp->tov = MBX_TOV_SECONDS;
  2370. mcp->flags = 0;
  2371. rval = qla2x00_mailbox_command(vha, mcp);
  2372. if (rval != QLA_SUCCESS) {
  2373. /*EMPTY*/
  2374. ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
  2375. } else {
  2376. /*EMPTY*/
  2377. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
  2378. "Done %s.\n", __func__);
  2379. }
  2380. return rval;
  2381. }
  2382. /*
  2383. * qla2x00_get_id_list
  2384. *
  2385. * Input:
  2386. * ha = adapter block pointer.
  2387. *
  2388. * Returns:
  2389. * qla2x00 local function return status code.
  2390. *
  2391. * Context:
  2392. * Kernel context.
  2393. */
  2394. int
  2395. qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
  2396. uint16_t *entries)
  2397. {
  2398. int rval;
  2399. mbx_cmd_t mc;
  2400. mbx_cmd_t *mcp = &mc;
  2401. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
  2402. "Entered %s.\n", __func__);
  2403. if (id_list == NULL)
  2404. return QLA_FUNCTION_FAILED;
  2405. mcp->mb[0] = MBC_GET_ID_LIST;
  2406. mcp->out_mb = MBX_0;
  2407. if (IS_FWI2_CAPABLE(vha->hw)) {
  2408. mcp->mb[2] = MSW(id_list_dma);
  2409. mcp->mb[3] = LSW(id_list_dma);
  2410. mcp->mb[6] = MSW(MSD(id_list_dma));
  2411. mcp->mb[7] = LSW(MSD(id_list_dma));
  2412. mcp->mb[8] = 0;
  2413. mcp->mb[9] = vha->vp_idx;
  2414. mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
  2415. } else {
  2416. mcp->mb[1] = MSW(id_list_dma);
  2417. mcp->mb[2] = LSW(id_list_dma);
  2418. mcp->mb[3] = MSW(MSD(id_list_dma));
  2419. mcp->mb[6] = LSW(MSD(id_list_dma));
  2420. mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
  2421. }
  2422. mcp->in_mb = MBX_1|MBX_0;
  2423. mcp->tov = MBX_TOV_SECONDS;
  2424. mcp->flags = 0;
  2425. rval = qla2x00_mailbox_command(vha, mcp);
  2426. if (rval != QLA_SUCCESS) {
  2427. /*EMPTY*/
  2428. ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
  2429. } else {
  2430. *entries = mcp->mb[1];
  2431. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
  2432. "Done %s.\n", __func__);
  2433. }
  2434. return rval;
  2435. }
  2436. /*
  2437. * qla2x00_get_resource_cnts
  2438. * Get current firmware resource counts.
  2439. *
  2440. * Input:
  2441. * ha = adapter block pointer.
  2442. *
  2443. * Returns:
  2444. * qla2x00 local function return status code.
  2445. *
  2446. * Context:
  2447. * Kernel context.
  2448. */
  2449. int
  2450. qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
  2451. {
  2452. struct qla_hw_data *ha = vha->hw;
  2453. int rval;
  2454. mbx_cmd_t mc;
  2455. mbx_cmd_t *mcp = &mc;
  2456. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
  2457. "Entered %s.\n", __func__);
  2458. mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
  2459. mcp->out_mb = MBX_0;
  2460. mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  2461. if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
  2462. mcp->in_mb |= MBX_12;
  2463. mcp->tov = MBX_TOV_SECONDS;
  2464. mcp->flags = 0;
  2465. rval = qla2x00_mailbox_command(vha, mcp);
  2466. if (rval != QLA_SUCCESS) {
  2467. /*EMPTY*/
  2468. ql_dbg(ql_dbg_mbx, vha, 0x107d,
  2469. "Failed mb[0]=%x.\n", mcp->mb[0]);
  2470. } else {
  2471. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
  2472. "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
  2473. "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
  2474. mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
  2475. mcp->mb[11], mcp->mb[12]);
  2476. ha->orig_fw_tgt_xcb_count = mcp->mb[1];
  2477. ha->cur_fw_tgt_xcb_count = mcp->mb[2];
  2478. ha->cur_fw_xcb_count = mcp->mb[3];
  2479. ha->orig_fw_xcb_count = mcp->mb[6];
  2480. ha->cur_fw_iocb_count = mcp->mb[7];
  2481. ha->orig_fw_iocb_count = mcp->mb[10];
  2482. if (ha->flags.npiv_supported)
  2483. ha->max_npiv_vports = mcp->mb[11];
  2484. if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
  2485. ha->fw_max_fcf_count = mcp->mb[12];
  2486. }
  2487. return (rval);
  2488. }
  2489. /*
  2490. * qla2x00_get_fcal_position_map
  2491. * Get FCAL (LILP) position map using mailbox command
  2492. *
  2493. * Input:
  2494. * ha = adapter state pointer.
  2495. * pos_map = buffer pointer (can be NULL).
  2496. *
  2497. * Returns:
  2498. * qla2x00 local function return status code.
  2499. *
  2500. * Context:
  2501. * Kernel context.
  2502. */
  2503. int
  2504. qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
  2505. {
  2506. int rval;
  2507. mbx_cmd_t mc;
  2508. mbx_cmd_t *mcp = &mc;
  2509. char *pmap;
  2510. dma_addr_t pmap_dma;
  2511. struct qla_hw_data *ha = vha->hw;
  2512. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
  2513. "Entered %s.\n", __func__);
  2514. pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
  2515. if (pmap == NULL) {
  2516. ql_log(ql_log_warn, vha, 0x1080,
  2517. "Memory alloc failed.\n");
  2518. return QLA_MEMORY_ALLOC_FAILED;
  2519. }
  2520. memset(pmap, 0, FCAL_MAP_SIZE);
  2521. mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
  2522. mcp->mb[2] = MSW(pmap_dma);
  2523. mcp->mb[3] = LSW(pmap_dma);
  2524. mcp->mb[6] = MSW(MSD(pmap_dma));
  2525. mcp->mb[7] = LSW(MSD(pmap_dma));
  2526. mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
  2527. mcp->in_mb = MBX_1|MBX_0;
  2528. mcp->buf_size = FCAL_MAP_SIZE;
  2529. mcp->flags = MBX_DMA_IN;
  2530. mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
  2531. rval = qla2x00_mailbox_command(vha, mcp);
  2532. if (rval == QLA_SUCCESS) {
  2533. ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
  2534. "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
  2535. mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
  2536. ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
  2537. pmap, pmap[0] + 1);
  2538. if (pos_map)
  2539. memcpy(pos_map, pmap, FCAL_MAP_SIZE);
  2540. }
  2541. dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
  2542. if (rval != QLA_SUCCESS) {
  2543. ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
  2544. } else {
  2545. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
  2546. "Done %s.\n", __func__);
  2547. }
  2548. return rval;
  2549. }
  2550. /*
  2551. * qla2x00_get_link_status
  2552. *
  2553. * Input:
  2554. * ha = adapter block pointer.
  2555. * loop_id = device loop ID.
  2556. * ret_buf = pointer to link status return buffer.
  2557. *
  2558. * Returns:
  2559. * 0 = success.
  2560. * BIT_0 = mem alloc error.
  2561. * BIT_1 = mailbox error.
  2562. */
  2563. int
  2564. qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
  2565. struct link_statistics *stats, dma_addr_t stats_dma)
  2566. {
  2567. int rval;
  2568. mbx_cmd_t mc;
  2569. mbx_cmd_t *mcp = &mc;
  2570. uint32_t *iter = (void *)stats;
  2571. ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
  2572. struct qla_hw_data *ha = vha->hw;
  2573. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
  2574. "Entered %s.\n", __func__);
  2575. mcp->mb[0] = MBC_GET_LINK_STATUS;
  2576. mcp->mb[2] = MSW(LSD(stats_dma));
  2577. mcp->mb[3] = LSW(LSD(stats_dma));
  2578. mcp->mb[6] = MSW(MSD(stats_dma));
  2579. mcp->mb[7] = LSW(MSD(stats_dma));
  2580. mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
  2581. mcp->in_mb = MBX_0;
  2582. if (IS_FWI2_CAPABLE(ha)) {
  2583. mcp->mb[1] = loop_id;
  2584. mcp->mb[4] = 0;
  2585. mcp->mb[10] = 0;
  2586. mcp->out_mb |= MBX_10|MBX_4|MBX_1;
  2587. mcp->in_mb |= MBX_1;
  2588. } else if (HAS_EXTENDED_IDS(ha)) {
  2589. mcp->mb[1] = loop_id;
  2590. mcp->mb[10] = 0;
  2591. mcp->out_mb |= MBX_10|MBX_1;
  2592. } else {
  2593. mcp->mb[1] = loop_id << 8;
  2594. mcp->out_mb |= MBX_1;
  2595. }
  2596. mcp->tov = MBX_TOV_SECONDS;
  2597. mcp->flags = IOCTL_CMD;
  2598. rval = qla2x00_mailbox_command(vha, mcp);
  2599. if (rval == QLA_SUCCESS) {
  2600. if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
  2601. ql_dbg(ql_dbg_mbx, vha, 0x1085,
  2602. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  2603. rval = QLA_FUNCTION_FAILED;
  2604. } else {
  2605. /* Re-endianize - firmware data is le32. */
  2606. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
  2607. "Done %s.\n", __func__);
  2608. for ( ; dwords--; iter++)
  2609. le32_to_cpus(iter);
  2610. }
  2611. } else {
  2612. /* Failed. */
  2613. ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
  2614. }
  2615. return rval;
  2616. }
  2617. int
  2618. qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
  2619. dma_addr_t stats_dma, uint16_t options)
  2620. {
  2621. int rval;
  2622. mbx_cmd_t mc;
  2623. mbx_cmd_t *mcp = &mc;
  2624. uint32_t *iter, dwords;
  2625. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
  2626. "Entered %s.\n", __func__);
  2627. memset(&mc, 0, sizeof(mc));
  2628. mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
  2629. mc.mb[2] = MSW(stats_dma);
  2630. mc.mb[3] = LSW(stats_dma);
  2631. mc.mb[6] = MSW(MSD(stats_dma));
  2632. mc.mb[7] = LSW(MSD(stats_dma));
  2633. mc.mb[8] = sizeof(struct link_statistics) / 4;
  2634. mc.mb[9] = cpu_to_le16(vha->vp_idx);
  2635. mc.mb[10] = cpu_to_le16(options);
  2636. rval = qla24xx_send_mb_cmd(vha, &mc);
  2637. if (rval == QLA_SUCCESS) {
  2638. if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
  2639. ql_dbg(ql_dbg_mbx, vha, 0x1089,
  2640. "Failed mb[0]=%x.\n", mcp->mb[0]);
  2641. rval = QLA_FUNCTION_FAILED;
  2642. } else {
  2643. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
  2644. "Done %s.\n", __func__);
  2645. /* Re-endianize - firmware data is le32. */
  2646. dwords = sizeof(struct link_statistics) / 4;
  2647. iter = &stats->link_fail_cnt;
  2648. for ( ; dwords--; iter++)
  2649. le32_to_cpus(iter);
  2650. }
  2651. } else {
  2652. /* Failed. */
  2653. ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
  2654. }
  2655. return rval;
  2656. }
  2657. int
  2658. qla24xx_abort_command(srb_t *sp)
  2659. {
  2660. int rval;
  2661. unsigned long flags = 0;
  2662. struct abort_entry_24xx *abt;
  2663. dma_addr_t abt_dma;
  2664. uint32_t handle;
  2665. fc_port_t *fcport = sp->fcport;
  2666. struct scsi_qla_host *vha = fcport->vha;
  2667. struct qla_hw_data *ha = vha->hw;
  2668. struct req_que *req = vha->req;
  2669. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
  2670. "Entered %s.\n", __func__);
  2671. if (sp->qpair)
  2672. req = sp->qpair->req;
  2673. if (ql2xasynctmfenable)
  2674. return qla24xx_async_abort_command(sp);
  2675. spin_lock_irqsave(&ha->hardware_lock, flags);
  2676. for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
  2677. if (req->outstanding_cmds[handle] == sp)
  2678. break;
  2679. }
  2680. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  2681. if (handle == req->num_outstanding_cmds) {
  2682. /* Command not found. */
  2683. return QLA_FUNCTION_FAILED;
  2684. }
  2685. abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
  2686. if (abt == NULL) {
  2687. ql_log(ql_log_warn, vha, 0x108d,
  2688. "Failed to allocate abort IOCB.\n");
  2689. return QLA_MEMORY_ALLOC_FAILED;
  2690. }
  2691. memset(abt, 0, sizeof(struct abort_entry_24xx));
  2692. abt->entry_type = ABORT_IOCB_TYPE;
  2693. abt->entry_count = 1;
  2694. abt->handle = MAKE_HANDLE(req->id, abt->handle);
  2695. abt->nport_handle = cpu_to_le16(fcport->loop_id);
  2696. abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
  2697. abt->port_id[0] = fcport->d_id.b.al_pa;
  2698. abt->port_id[1] = fcport->d_id.b.area;
  2699. abt->port_id[2] = fcport->d_id.b.domain;
  2700. abt->vp_index = fcport->vha->vp_idx;
  2701. abt->req_que_no = cpu_to_le16(req->id);
  2702. rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
  2703. if (rval != QLA_SUCCESS) {
  2704. ql_dbg(ql_dbg_mbx, vha, 0x108e,
  2705. "Failed to issue IOCB (%x).\n", rval);
  2706. } else if (abt->entry_status != 0) {
  2707. ql_dbg(ql_dbg_mbx, vha, 0x108f,
  2708. "Failed to complete IOCB -- error status (%x).\n",
  2709. abt->entry_status);
  2710. rval = QLA_FUNCTION_FAILED;
  2711. } else if (abt->nport_handle != cpu_to_le16(0)) {
  2712. ql_dbg(ql_dbg_mbx, vha, 0x1090,
  2713. "Failed to complete IOCB -- completion status (%x).\n",
  2714. le16_to_cpu(abt->nport_handle));
  2715. if (abt->nport_handle == CS_IOCB_ERROR)
  2716. rval = QLA_FUNCTION_PARAMETER_ERROR;
  2717. else
  2718. rval = QLA_FUNCTION_FAILED;
  2719. } else {
  2720. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
  2721. "Done %s.\n", __func__);
  2722. }
  2723. dma_pool_free(ha->s_dma_pool, abt, abt_dma);
  2724. return rval;
  2725. }
  2726. struct tsk_mgmt_cmd {
  2727. union {
  2728. struct tsk_mgmt_entry tsk;
  2729. struct sts_entry_24xx sts;
  2730. } p;
  2731. };
  2732. static int
  2733. __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
  2734. uint64_t l, int tag)
  2735. {
  2736. int rval, rval2;
  2737. struct tsk_mgmt_cmd *tsk;
  2738. struct sts_entry_24xx *sts;
  2739. dma_addr_t tsk_dma;
  2740. scsi_qla_host_t *vha;
  2741. struct qla_hw_data *ha;
  2742. struct req_que *req;
  2743. struct rsp_que *rsp;
  2744. struct qla_qpair *qpair;
  2745. vha = fcport->vha;
  2746. ha = vha->hw;
  2747. req = vha->req;
  2748. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
  2749. "Entered %s.\n", __func__);
  2750. if (vha->vp_idx && vha->qpair) {
  2751. /* NPIV port */
  2752. qpair = vha->qpair;
  2753. rsp = qpair->rsp;
  2754. req = qpair->req;
  2755. } else {
  2756. rsp = req->rsp;
  2757. }
  2758. tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
  2759. if (tsk == NULL) {
  2760. ql_log(ql_log_warn, vha, 0x1093,
  2761. "Failed to allocate task management IOCB.\n");
  2762. return QLA_MEMORY_ALLOC_FAILED;
  2763. }
  2764. memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
  2765. tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
  2766. tsk->p.tsk.entry_count = 1;
  2767. tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
  2768. tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
  2769. tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
  2770. tsk->p.tsk.control_flags = cpu_to_le32(type);
  2771. tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
  2772. tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
  2773. tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
  2774. tsk->p.tsk.vp_index = fcport->vha->vp_idx;
  2775. if (type == TCF_LUN_RESET) {
  2776. int_to_scsilun(l, &tsk->p.tsk.lun);
  2777. host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
  2778. sizeof(tsk->p.tsk.lun));
  2779. }
  2780. sts = &tsk->p.sts;
  2781. rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
  2782. if (rval != QLA_SUCCESS) {
  2783. ql_dbg(ql_dbg_mbx, vha, 0x1094,
  2784. "Failed to issue %s reset IOCB (%x).\n", name, rval);
  2785. } else if (sts->entry_status != 0) {
  2786. ql_dbg(ql_dbg_mbx, vha, 0x1095,
  2787. "Failed to complete IOCB -- error status (%x).\n",
  2788. sts->entry_status);
  2789. rval = QLA_FUNCTION_FAILED;
  2790. } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
  2791. ql_dbg(ql_dbg_mbx, vha, 0x1096,
  2792. "Failed to complete IOCB -- completion status (%x).\n",
  2793. le16_to_cpu(sts->comp_status));
  2794. rval = QLA_FUNCTION_FAILED;
  2795. } else if (le16_to_cpu(sts->scsi_status) &
  2796. SS_RESPONSE_INFO_LEN_VALID) {
  2797. if (le32_to_cpu(sts->rsp_data_len) < 4) {
  2798. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
  2799. "Ignoring inconsistent data length -- not enough "
  2800. "response info (%d).\n",
  2801. le32_to_cpu(sts->rsp_data_len));
  2802. } else if (sts->data[3]) {
  2803. ql_dbg(ql_dbg_mbx, vha, 0x1098,
  2804. "Failed to complete IOCB -- response (%x).\n",
  2805. sts->data[3]);
  2806. rval = QLA_FUNCTION_FAILED;
  2807. }
  2808. }
  2809. /* Issue marker IOCB. */
  2810. rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
  2811. type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
  2812. if (rval2 != QLA_SUCCESS) {
  2813. ql_dbg(ql_dbg_mbx, vha, 0x1099,
  2814. "Failed to issue marker IOCB (%x).\n", rval2);
  2815. } else {
  2816. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
  2817. "Done %s.\n", __func__);
  2818. }
  2819. dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
  2820. return rval;
  2821. }
  2822. int
  2823. qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
  2824. {
  2825. struct qla_hw_data *ha = fcport->vha->hw;
  2826. if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
  2827. return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
  2828. return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
  2829. }
  2830. int
  2831. qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
  2832. {
  2833. struct qla_hw_data *ha = fcport->vha->hw;
  2834. if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
  2835. return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
  2836. return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
  2837. }
  2838. int
  2839. qla2x00_system_error(scsi_qla_host_t *vha)
  2840. {
  2841. int rval;
  2842. mbx_cmd_t mc;
  2843. mbx_cmd_t *mcp = &mc;
  2844. struct qla_hw_data *ha = vha->hw;
  2845. if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
  2846. return QLA_FUNCTION_FAILED;
  2847. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
  2848. "Entered %s.\n", __func__);
  2849. mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
  2850. mcp->out_mb = MBX_0;
  2851. mcp->in_mb = MBX_0;
  2852. mcp->tov = 5;
  2853. mcp->flags = 0;
  2854. rval = qla2x00_mailbox_command(vha, mcp);
  2855. if (rval != QLA_SUCCESS) {
  2856. ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
  2857. } else {
  2858. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
  2859. "Done %s.\n", __func__);
  2860. }
  2861. return rval;
  2862. }
  2863. int
  2864. qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
  2865. {
  2866. int rval;
  2867. mbx_cmd_t mc;
  2868. mbx_cmd_t *mcp = &mc;
  2869. if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
  2870. !IS_QLA27XX(vha->hw))
  2871. return QLA_FUNCTION_FAILED;
  2872. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
  2873. "Entered %s.\n", __func__);
  2874. mcp->mb[0] = MBC_WRITE_SERDES;
  2875. mcp->mb[1] = addr;
  2876. if (IS_QLA2031(vha->hw))
  2877. mcp->mb[2] = data & 0xff;
  2878. else
  2879. mcp->mb[2] = data;
  2880. mcp->mb[3] = 0;
  2881. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  2882. mcp->in_mb = MBX_0;
  2883. mcp->tov = MBX_TOV_SECONDS;
  2884. mcp->flags = 0;
  2885. rval = qla2x00_mailbox_command(vha, mcp);
  2886. if (rval != QLA_SUCCESS) {
  2887. ql_dbg(ql_dbg_mbx, vha, 0x1183,
  2888. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  2889. } else {
  2890. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
  2891. "Done %s.\n", __func__);
  2892. }
  2893. return rval;
  2894. }
  2895. int
  2896. qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
  2897. {
  2898. int rval;
  2899. mbx_cmd_t mc;
  2900. mbx_cmd_t *mcp = &mc;
  2901. if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
  2902. !IS_QLA27XX(vha->hw))
  2903. return QLA_FUNCTION_FAILED;
  2904. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
  2905. "Entered %s.\n", __func__);
  2906. mcp->mb[0] = MBC_READ_SERDES;
  2907. mcp->mb[1] = addr;
  2908. mcp->mb[3] = 0;
  2909. mcp->out_mb = MBX_3|MBX_1|MBX_0;
  2910. mcp->in_mb = MBX_1|MBX_0;
  2911. mcp->tov = MBX_TOV_SECONDS;
  2912. mcp->flags = 0;
  2913. rval = qla2x00_mailbox_command(vha, mcp);
  2914. if (IS_QLA2031(vha->hw))
  2915. *data = mcp->mb[1] & 0xff;
  2916. else
  2917. *data = mcp->mb[1];
  2918. if (rval != QLA_SUCCESS) {
  2919. ql_dbg(ql_dbg_mbx, vha, 0x1186,
  2920. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  2921. } else {
  2922. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
  2923. "Done %s.\n", __func__);
  2924. }
  2925. return rval;
  2926. }
  2927. int
  2928. qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
  2929. {
  2930. int rval;
  2931. mbx_cmd_t mc;
  2932. mbx_cmd_t *mcp = &mc;
  2933. if (!IS_QLA8044(vha->hw))
  2934. return QLA_FUNCTION_FAILED;
  2935. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
  2936. "Entered %s.\n", __func__);
  2937. mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
  2938. mcp->mb[1] = HCS_WRITE_SERDES;
  2939. mcp->mb[3] = LSW(addr);
  2940. mcp->mb[4] = MSW(addr);
  2941. mcp->mb[5] = LSW(data);
  2942. mcp->mb[6] = MSW(data);
  2943. mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
  2944. mcp->in_mb = MBX_0;
  2945. mcp->tov = MBX_TOV_SECONDS;
  2946. mcp->flags = 0;
  2947. rval = qla2x00_mailbox_command(vha, mcp);
  2948. if (rval != QLA_SUCCESS) {
  2949. ql_dbg(ql_dbg_mbx, vha, 0x11a1,
  2950. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  2951. } else {
  2952. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
  2953. "Done %s.\n", __func__);
  2954. }
  2955. return rval;
  2956. }
  2957. int
  2958. qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
  2959. {
  2960. int rval;
  2961. mbx_cmd_t mc;
  2962. mbx_cmd_t *mcp = &mc;
  2963. if (!IS_QLA8044(vha->hw))
  2964. return QLA_FUNCTION_FAILED;
  2965. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
  2966. "Entered %s.\n", __func__);
  2967. mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
  2968. mcp->mb[1] = HCS_READ_SERDES;
  2969. mcp->mb[3] = LSW(addr);
  2970. mcp->mb[4] = MSW(addr);
  2971. mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
  2972. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  2973. mcp->tov = MBX_TOV_SECONDS;
  2974. mcp->flags = 0;
  2975. rval = qla2x00_mailbox_command(vha, mcp);
  2976. *data = mcp->mb[2] << 16 | mcp->mb[1];
  2977. if (rval != QLA_SUCCESS) {
  2978. ql_dbg(ql_dbg_mbx, vha, 0x118a,
  2979. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  2980. } else {
  2981. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
  2982. "Done %s.\n", __func__);
  2983. }
  2984. return rval;
  2985. }
  2986. /**
  2987. * qla2x00_set_serdes_params() -
  2988. * @ha: HA context
  2989. *
  2990. * Returns
  2991. */
  2992. int
  2993. qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
  2994. uint16_t sw_em_2g, uint16_t sw_em_4g)
  2995. {
  2996. int rval;
  2997. mbx_cmd_t mc;
  2998. mbx_cmd_t *mcp = &mc;
  2999. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
  3000. "Entered %s.\n", __func__);
  3001. mcp->mb[0] = MBC_SERDES_PARAMS;
  3002. mcp->mb[1] = BIT_0;
  3003. mcp->mb[2] = sw_em_1g | BIT_15;
  3004. mcp->mb[3] = sw_em_2g | BIT_15;
  3005. mcp->mb[4] = sw_em_4g | BIT_15;
  3006. mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3007. mcp->in_mb = MBX_0;
  3008. mcp->tov = MBX_TOV_SECONDS;
  3009. mcp->flags = 0;
  3010. rval = qla2x00_mailbox_command(vha, mcp);
  3011. if (rval != QLA_SUCCESS) {
  3012. /*EMPTY*/
  3013. ql_dbg(ql_dbg_mbx, vha, 0x109f,
  3014. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  3015. } else {
  3016. /*EMPTY*/
  3017. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
  3018. "Done %s.\n", __func__);
  3019. }
  3020. return rval;
  3021. }
  3022. int
  3023. qla2x00_stop_firmware(scsi_qla_host_t *vha)
  3024. {
  3025. int rval;
  3026. mbx_cmd_t mc;
  3027. mbx_cmd_t *mcp = &mc;
  3028. if (!IS_FWI2_CAPABLE(vha->hw))
  3029. return QLA_FUNCTION_FAILED;
  3030. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
  3031. "Entered %s.\n", __func__);
  3032. mcp->mb[0] = MBC_STOP_FIRMWARE;
  3033. mcp->mb[1] = 0;
  3034. mcp->out_mb = MBX_1|MBX_0;
  3035. mcp->in_mb = MBX_0;
  3036. mcp->tov = 5;
  3037. mcp->flags = 0;
  3038. rval = qla2x00_mailbox_command(vha, mcp);
  3039. if (rval != QLA_SUCCESS) {
  3040. ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
  3041. if (mcp->mb[0] == MBS_INVALID_COMMAND)
  3042. rval = QLA_INVALID_COMMAND;
  3043. } else {
  3044. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
  3045. "Done %s.\n", __func__);
  3046. }
  3047. return rval;
  3048. }
  3049. int
  3050. qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
  3051. uint16_t buffers)
  3052. {
  3053. int rval;
  3054. mbx_cmd_t mc;
  3055. mbx_cmd_t *mcp = &mc;
  3056. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
  3057. "Entered %s.\n", __func__);
  3058. if (!IS_FWI2_CAPABLE(vha->hw))
  3059. return QLA_FUNCTION_FAILED;
  3060. if (unlikely(pci_channel_offline(vha->hw->pdev)))
  3061. return QLA_FUNCTION_FAILED;
  3062. mcp->mb[0] = MBC_TRACE_CONTROL;
  3063. mcp->mb[1] = TC_EFT_ENABLE;
  3064. mcp->mb[2] = LSW(eft_dma);
  3065. mcp->mb[3] = MSW(eft_dma);
  3066. mcp->mb[4] = LSW(MSD(eft_dma));
  3067. mcp->mb[5] = MSW(MSD(eft_dma));
  3068. mcp->mb[6] = buffers;
  3069. mcp->mb[7] = TC_AEN_DISABLE;
  3070. mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3071. mcp->in_mb = MBX_1|MBX_0;
  3072. mcp->tov = MBX_TOV_SECONDS;
  3073. mcp->flags = 0;
  3074. rval = qla2x00_mailbox_command(vha, mcp);
  3075. if (rval != QLA_SUCCESS) {
  3076. ql_dbg(ql_dbg_mbx, vha, 0x10a5,
  3077. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3078. rval, mcp->mb[0], mcp->mb[1]);
  3079. } else {
  3080. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
  3081. "Done %s.\n", __func__);
  3082. }
  3083. return rval;
  3084. }
  3085. int
  3086. qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
  3087. {
  3088. int rval;
  3089. mbx_cmd_t mc;
  3090. mbx_cmd_t *mcp = &mc;
  3091. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
  3092. "Entered %s.\n", __func__);
  3093. if (!IS_FWI2_CAPABLE(vha->hw))
  3094. return QLA_FUNCTION_FAILED;
  3095. if (unlikely(pci_channel_offline(vha->hw->pdev)))
  3096. return QLA_FUNCTION_FAILED;
  3097. mcp->mb[0] = MBC_TRACE_CONTROL;
  3098. mcp->mb[1] = TC_EFT_DISABLE;
  3099. mcp->out_mb = MBX_1|MBX_0;
  3100. mcp->in_mb = MBX_1|MBX_0;
  3101. mcp->tov = MBX_TOV_SECONDS;
  3102. mcp->flags = 0;
  3103. rval = qla2x00_mailbox_command(vha, mcp);
  3104. if (rval != QLA_SUCCESS) {
  3105. ql_dbg(ql_dbg_mbx, vha, 0x10a8,
  3106. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3107. rval, mcp->mb[0], mcp->mb[1]);
  3108. } else {
  3109. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
  3110. "Done %s.\n", __func__);
  3111. }
  3112. return rval;
  3113. }
  3114. int
  3115. qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
  3116. uint16_t buffers, uint16_t *mb, uint32_t *dwords)
  3117. {
  3118. int rval;
  3119. mbx_cmd_t mc;
  3120. mbx_cmd_t *mcp = &mc;
  3121. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
  3122. "Entered %s.\n", __func__);
  3123. if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
  3124. !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
  3125. return QLA_FUNCTION_FAILED;
  3126. if (unlikely(pci_channel_offline(vha->hw->pdev)))
  3127. return QLA_FUNCTION_FAILED;
  3128. mcp->mb[0] = MBC_TRACE_CONTROL;
  3129. mcp->mb[1] = TC_FCE_ENABLE;
  3130. mcp->mb[2] = LSW(fce_dma);
  3131. mcp->mb[3] = MSW(fce_dma);
  3132. mcp->mb[4] = LSW(MSD(fce_dma));
  3133. mcp->mb[5] = MSW(MSD(fce_dma));
  3134. mcp->mb[6] = buffers;
  3135. mcp->mb[7] = TC_AEN_DISABLE;
  3136. mcp->mb[8] = 0;
  3137. mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
  3138. mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
  3139. mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
  3140. MBX_1|MBX_0;
  3141. mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3142. mcp->tov = MBX_TOV_SECONDS;
  3143. mcp->flags = 0;
  3144. rval = qla2x00_mailbox_command(vha, mcp);
  3145. if (rval != QLA_SUCCESS) {
  3146. ql_dbg(ql_dbg_mbx, vha, 0x10ab,
  3147. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3148. rval, mcp->mb[0], mcp->mb[1]);
  3149. } else {
  3150. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
  3151. "Done %s.\n", __func__);
  3152. if (mb)
  3153. memcpy(mb, mcp->mb, 8 * sizeof(*mb));
  3154. if (dwords)
  3155. *dwords = buffers;
  3156. }
  3157. return rval;
  3158. }
  3159. int
  3160. qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
  3161. {
  3162. int rval;
  3163. mbx_cmd_t mc;
  3164. mbx_cmd_t *mcp = &mc;
  3165. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
  3166. "Entered %s.\n", __func__);
  3167. if (!IS_FWI2_CAPABLE(vha->hw))
  3168. return QLA_FUNCTION_FAILED;
  3169. if (unlikely(pci_channel_offline(vha->hw->pdev)))
  3170. return QLA_FUNCTION_FAILED;
  3171. mcp->mb[0] = MBC_TRACE_CONTROL;
  3172. mcp->mb[1] = TC_FCE_DISABLE;
  3173. mcp->mb[2] = TC_FCE_DISABLE_TRACE;
  3174. mcp->out_mb = MBX_2|MBX_1|MBX_0;
  3175. mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
  3176. MBX_1|MBX_0;
  3177. mcp->tov = MBX_TOV_SECONDS;
  3178. mcp->flags = 0;
  3179. rval = qla2x00_mailbox_command(vha, mcp);
  3180. if (rval != QLA_SUCCESS) {
  3181. ql_dbg(ql_dbg_mbx, vha, 0x10ae,
  3182. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3183. rval, mcp->mb[0], mcp->mb[1]);
  3184. } else {
  3185. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
  3186. "Done %s.\n", __func__);
  3187. if (wr)
  3188. *wr = (uint64_t) mcp->mb[5] << 48 |
  3189. (uint64_t) mcp->mb[4] << 32 |
  3190. (uint64_t) mcp->mb[3] << 16 |
  3191. (uint64_t) mcp->mb[2];
  3192. if (rd)
  3193. *rd = (uint64_t) mcp->mb[9] << 48 |
  3194. (uint64_t) mcp->mb[8] << 32 |
  3195. (uint64_t) mcp->mb[7] << 16 |
  3196. (uint64_t) mcp->mb[6];
  3197. }
  3198. return rval;
  3199. }
  3200. int
  3201. qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
  3202. uint16_t *port_speed, uint16_t *mb)
  3203. {
  3204. int rval;
  3205. mbx_cmd_t mc;
  3206. mbx_cmd_t *mcp = &mc;
  3207. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
  3208. "Entered %s.\n", __func__);
  3209. if (!IS_IIDMA_CAPABLE(vha->hw))
  3210. return QLA_FUNCTION_FAILED;
  3211. mcp->mb[0] = MBC_PORT_PARAMS;
  3212. mcp->mb[1] = loop_id;
  3213. mcp->mb[2] = mcp->mb[3] = 0;
  3214. mcp->mb[9] = vha->vp_idx;
  3215. mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
  3216. mcp->in_mb = MBX_3|MBX_1|MBX_0;
  3217. mcp->tov = MBX_TOV_SECONDS;
  3218. mcp->flags = 0;
  3219. rval = qla2x00_mailbox_command(vha, mcp);
  3220. /* Return mailbox statuses. */
  3221. if (mb != NULL) {
  3222. mb[0] = mcp->mb[0];
  3223. mb[1] = mcp->mb[1];
  3224. mb[3] = mcp->mb[3];
  3225. }
  3226. if (rval != QLA_SUCCESS) {
  3227. ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
  3228. } else {
  3229. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
  3230. "Done %s.\n", __func__);
  3231. if (port_speed)
  3232. *port_speed = mcp->mb[3];
  3233. }
  3234. return rval;
  3235. }
  3236. int
  3237. qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
  3238. uint16_t port_speed, uint16_t *mb)
  3239. {
  3240. int rval;
  3241. mbx_cmd_t mc;
  3242. mbx_cmd_t *mcp = &mc;
  3243. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
  3244. "Entered %s.\n", __func__);
  3245. if (!IS_IIDMA_CAPABLE(vha->hw))
  3246. return QLA_FUNCTION_FAILED;
  3247. mcp->mb[0] = MBC_PORT_PARAMS;
  3248. mcp->mb[1] = loop_id;
  3249. mcp->mb[2] = BIT_0;
  3250. mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
  3251. mcp->mb[9] = vha->vp_idx;
  3252. mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
  3253. mcp->in_mb = MBX_3|MBX_1|MBX_0;
  3254. mcp->tov = MBX_TOV_SECONDS;
  3255. mcp->flags = 0;
  3256. rval = qla2x00_mailbox_command(vha, mcp);
  3257. /* Return mailbox statuses. */
  3258. if (mb != NULL) {
  3259. mb[0] = mcp->mb[0];
  3260. mb[1] = mcp->mb[1];
  3261. mb[3] = mcp->mb[3];
  3262. }
  3263. if (rval != QLA_SUCCESS) {
  3264. ql_dbg(ql_dbg_mbx, vha, 0x10b4,
  3265. "Failed=%x.\n", rval);
  3266. } else {
  3267. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
  3268. "Done %s.\n", __func__);
  3269. }
  3270. return rval;
  3271. }
  3272. void
  3273. qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
  3274. struct vp_rpt_id_entry_24xx *rptid_entry)
  3275. {
  3276. struct qla_hw_data *ha = vha->hw;
  3277. scsi_qla_host_t *vp = NULL;
  3278. unsigned long flags;
  3279. int found;
  3280. port_id_t id;
  3281. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
  3282. "Entered %s.\n", __func__);
  3283. if (rptid_entry->entry_status != 0)
  3284. return;
  3285. id.b.domain = rptid_entry->port_id[2];
  3286. id.b.area = rptid_entry->port_id[1];
  3287. id.b.al_pa = rptid_entry->port_id[0];
  3288. id.b.rsvd_1 = 0;
  3289. if (rptid_entry->format == 0) {
  3290. /* loop */
  3291. ql_dbg(ql_dbg_async, vha, 0x10b7,
  3292. "Format 0 : Number of VPs setup %d, number of "
  3293. "VPs acquired %d.\n", rptid_entry->vp_setup,
  3294. rptid_entry->vp_acquired);
  3295. ql_dbg(ql_dbg_async, vha, 0x10b8,
  3296. "Primary port id %02x%02x%02x.\n",
  3297. rptid_entry->port_id[2], rptid_entry->port_id[1],
  3298. rptid_entry->port_id[0]);
  3299. qlt_update_host_map(vha, id);
  3300. } else if (rptid_entry->format == 1) {
  3301. /* fabric */
  3302. ql_dbg(ql_dbg_async, vha, 0x10b9,
  3303. "Format 1: VP[%d] enabled - status %d - with "
  3304. "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
  3305. rptid_entry->vp_status,
  3306. rptid_entry->port_id[2], rptid_entry->port_id[1],
  3307. rptid_entry->port_id[0]);
  3308. /* buffer to buffer credit flag */
  3309. vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
  3310. if (rptid_entry->vp_idx == 0) {
  3311. if (rptid_entry->vp_status == VP_STAT_COMPL) {
  3312. /* FA-WWN is only for physical port */
  3313. if (qla_ini_mode_enabled(vha) &&
  3314. ha->flags.fawwpn_enabled &&
  3315. (rptid_entry->u.f1.flags &
  3316. BIT_6)) {
  3317. memcpy(vha->port_name,
  3318. rptid_entry->u.f1.port_name,
  3319. WWN_SIZE);
  3320. }
  3321. qlt_update_host_map(vha, id);
  3322. }
  3323. set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
  3324. set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
  3325. } else {
  3326. if (rptid_entry->vp_status != VP_STAT_COMPL &&
  3327. rptid_entry->vp_status != VP_STAT_ID_CHG) {
  3328. ql_dbg(ql_dbg_mbx, vha, 0x10ba,
  3329. "Could not acquire ID for VP[%d].\n",
  3330. rptid_entry->vp_idx);
  3331. return;
  3332. }
  3333. found = 0;
  3334. spin_lock_irqsave(&ha->vport_slock, flags);
  3335. list_for_each_entry(vp, &ha->vp_list, list) {
  3336. if (rptid_entry->vp_idx == vp->vp_idx) {
  3337. found = 1;
  3338. break;
  3339. }
  3340. }
  3341. spin_unlock_irqrestore(&ha->vport_slock, flags);
  3342. if (!found)
  3343. return;
  3344. qlt_update_host_map(vp, id);
  3345. /*
  3346. * Cannot configure here as we are still sitting on the
  3347. * response queue. Handle it in dpc context.
  3348. */
  3349. set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
  3350. set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
  3351. set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
  3352. }
  3353. set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
  3354. qla2xxx_wake_dpc(vha);
  3355. } else if (rptid_entry->format == 2) {
  3356. ql_dbg(ql_dbg_async, vha, 0x505f,
  3357. "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
  3358. rptid_entry->port_id[2], rptid_entry->port_id[1],
  3359. rptid_entry->port_id[0]);
  3360. ql_dbg(ql_dbg_async, vha, 0x5075,
  3361. "N2N: Remote WWPN %8phC.\n",
  3362. rptid_entry->u.f2.port_name);
  3363. /* N2N. direct connect */
  3364. vha->d_id.b.domain = rptid_entry->port_id[2];
  3365. vha->d_id.b.area = rptid_entry->port_id[1];
  3366. vha->d_id.b.al_pa = rptid_entry->port_id[0];
  3367. spin_lock_irqsave(&ha->vport_slock, flags);
  3368. qlt_update_vp_map(vha, SET_AL_PA);
  3369. spin_unlock_irqrestore(&ha->vport_slock, flags);
  3370. }
  3371. }
  3372. /*
  3373. * qla24xx_modify_vp_config
  3374. * Change VP configuration for vha
  3375. *
  3376. * Input:
  3377. * vha = adapter block pointer.
  3378. *
  3379. * Returns:
  3380. * qla2xxx local function return status code.
  3381. *
  3382. * Context:
  3383. * Kernel context.
  3384. */
  3385. int
  3386. qla24xx_modify_vp_config(scsi_qla_host_t *vha)
  3387. {
  3388. int rval;
  3389. struct vp_config_entry_24xx *vpmod;
  3390. dma_addr_t vpmod_dma;
  3391. struct qla_hw_data *ha = vha->hw;
  3392. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  3393. /* This can be called by the parent */
  3394. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
  3395. "Entered %s.\n", __func__);
  3396. vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
  3397. if (!vpmod) {
  3398. ql_log(ql_log_warn, vha, 0x10bc,
  3399. "Failed to allocate modify VP IOCB.\n");
  3400. return QLA_MEMORY_ALLOC_FAILED;
  3401. }
  3402. memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
  3403. vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
  3404. vpmod->entry_count = 1;
  3405. vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
  3406. vpmod->vp_count = 1;
  3407. vpmod->vp_index1 = vha->vp_idx;
  3408. vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
  3409. qlt_modify_vp_config(vha, vpmod);
  3410. memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
  3411. memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
  3412. vpmod->entry_count = 1;
  3413. rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
  3414. if (rval != QLA_SUCCESS) {
  3415. ql_dbg(ql_dbg_mbx, vha, 0x10bd,
  3416. "Failed to issue VP config IOCB (%x).\n", rval);
  3417. } else if (vpmod->comp_status != 0) {
  3418. ql_dbg(ql_dbg_mbx, vha, 0x10be,
  3419. "Failed to complete IOCB -- error status (%x).\n",
  3420. vpmod->comp_status);
  3421. rval = QLA_FUNCTION_FAILED;
  3422. } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
  3423. ql_dbg(ql_dbg_mbx, vha, 0x10bf,
  3424. "Failed to complete IOCB -- completion status (%x).\n",
  3425. le16_to_cpu(vpmod->comp_status));
  3426. rval = QLA_FUNCTION_FAILED;
  3427. } else {
  3428. /* EMPTY */
  3429. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
  3430. "Done %s.\n", __func__);
  3431. fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
  3432. }
  3433. dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
  3434. return rval;
  3435. }
  3436. /*
  3437. * qla24xx_control_vp
  3438. * Enable a virtual port for given host
  3439. *
  3440. * Input:
  3441. * ha = adapter block pointer.
  3442. * vhba = virtual adapter (unused)
  3443. * index = index number for enabled VP
  3444. *
  3445. * Returns:
  3446. * qla2xxx local function return status code.
  3447. *
  3448. * Context:
  3449. * Kernel context.
  3450. */
  3451. int
  3452. qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
  3453. {
  3454. int rval;
  3455. int map, pos;
  3456. struct vp_ctrl_entry_24xx *vce;
  3457. dma_addr_t vce_dma;
  3458. struct qla_hw_data *ha = vha->hw;
  3459. int vp_index = vha->vp_idx;
  3460. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  3461. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
  3462. "Entered %s enabling index %d.\n", __func__, vp_index);
  3463. if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
  3464. return QLA_PARAMETER_ERROR;
  3465. vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
  3466. if (!vce) {
  3467. ql_log(ql_log_warn, vha, 0x10c2,
  3468. "Failed to allocate VP control IOCB.\n");
  3469. return QLA_MEMORY_ALLOC_FAILED;
  3470. }
  3471. memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
  3472. vce->entry_type = VP_CTRL_IOCB_TYPE;
  3473. vce->entry_count = 1;
  3474. vce->command = cpu_to_le16(cmd);
  3475. vce->vp_count = cpu_to_le16(1);
  3476. /* index map in firmware starts with 1; decrement index
  3477. * this is ok as we never use index 0
  3478. */
  3479. map = (vp_index - 1) / 8;
  3480. pos = (vp_index - 1) & 7;
  3481. mutex_lock(&ha->vport_lock);
  3482. vce->vp_idx_map[map] |= 1 << pos;
  3483. mutex_unlock(&ha->vport_lock);
  3484. rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
  3485. if (rval != QLA_SUCCESS) {
  3486. ql_dbg(ql_dbg_mbx, vha, 0x10c3,
  3487. "Failed to issue VP control IOCB (%x).\n", rval);
  3488. } else if (vce->entry_status != 0) {
  3489. ql_dbg(ql_dbg_mbx, vha, 0x10c4,
  3490. "Failed to complete IOCB -- error status (%x).\n",
  3491. vce->entry_status);
  3492. rval = QLA_FUNCTION_FAILED;
  3493. } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
  3494. ql_dbg(ql_dbg_mbx, vha, 0x10c5,
  3495. "Failed to complete IOCB -- completion status (%x).\n",
  3496. le16_to_cpu(vce->comp_status));
  3497. rval = QLA_FUNCTION_FAILED;
  3498. } else {
  3499. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
  3500. "Done %s.\n", __func__);
  3501. }
  3502. dma_pool_free(ha->s_dma_pool, vce, vce_dma);
  3503. return rval;
  3504. }
  3505. /*
  3506. * qla2x00_send_change_request
  3507. * Receive or disable RSCN request from fabric controller
  3508. *
  3509. * Input:
  3510. * ha = adapter block pointer
  3511. * format = registration format:
  3512. * 0 - Reserved
  3513. * 1 - Fabric detected registration
  3514. * 2 - N_port detected registration
  3515. * 3 - Full registration
  3516. * FF - clear registration
  3517. * vp_idx = Virtual port index
  3518. *
  3519. * Returns:
  3520. * qla2x00 local function return status code.
  3521. *
  3522. * Context:
  3523. * Kernel Context
  3524. */
  3525. int
  3526. qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
  3527. uint16_t vp_idx)
  3528. {
  3529. int rval;
  3530. mbx_cmd_t mc;
  3531. mbx_cmd_t *mcp = &mc;
  3532. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
  3533. "Entered %s.\n", __func__);
  3534. mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
  3535. mcp->mb[1] = format;
  3536. mcp->mb[9] = vp_idx;
  3537. mcp->out_mb = MBX_9|MBX_1|MBX_0;
  3538. mcp->in_mb = MBX_0|MBX_1;
  3539. mcp->tov = MBX_TOV_SECONDS;
  3540. mcp->flags = 0;
  3541. rval = qla2x00_mailbox_command(vha, mcp);
  3542. if (rval == QLA_SUCCESS) {
  3543. if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
  3544. rval = BIT_1;
  3545. }
  3546. } else
  3547. rval = BIT_1;
  3548. return rval;
  3549. }
  3550. int
  3551. qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
  3552. uint32_t size)
  3553. {
  3554. int rval;
  3555. mbx_cmd_t mc;
  3556. mbx_cmd_t *mcp = &mc;
  3557. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
  3558. "Entered %s.\n", __func__);
  3559. if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
  3560. mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
  3561. mcp->mb[8] = MSW(addr);
  3562. mcp->out_mb = MBX_8|MBX_0;
  3563. } else {
  3564. mcp->mb[0] = MBC_DUMP_RISC_RAM;
  3565. mcp->out_mb = MBX_0;
  3566. }
  3567. mcp->mb[1] = LSW(addr);
  3568. mcp->mb[2] = MSW(req_dma);
  3569. mcp->mb[3] = LSW(req_dma);
  3570. mcp->mb[6] = MSW(MSD(req_dma));
  3571. mcp->mb[7] = LSW(MSD(req_dma));
  3572. mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
  3573. if (IS_FWI2_CAPABLE(vha->hw)) {
  3574. mcp->mb[4] = MSW(size);
  3575. mcp->mb[5] = LSW(size);
  3576. mcp->out_mb |= MBX_5|MBX_4;
  3577. } else {
  3578. mcp->mb[4] = LSW(size);
  3579. mcp->out_mb |= MBX_4;
  3580. }
  3581. mcp->in_mb = MBX_0;
  3582. mcp->tov = MBX_TOV_SECONDS;
  3583. mcp->flags = 0;
  3584. rval = qla2x00_mailbox_command(vha, mcp);
  3585. if (rval != QLA_SUCCESS) {
  3586. ql_dbg(ql_dbg_mbx, vha, 0x1008,
  3587. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  3588. } else {
  3589. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
  3590. "Done %s.\n", __func__);
  3591. }
  3592. return rval;
  3593. }
  3594. /* 84XX Support **************************************************************/
  3595. struct cs84xx_mgmt_cmd {
  3596. union {
  3597. struct verify_chip_entry_84xx req;
  3598. struct verify_chip_rsp_84xx rsp;
  3599. } p;
  3600. };
  3601. int
  3602. qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
  3603. {
  3604. int rval, retry;
  3605. struct cs84xx_mgmt_cmd *mn;
  3606. dma_addr_t mn_dma;
  3607. uint16_t options;
  3608. unsigned long flags;
  3609. struct qla_hw_data *ha = vha->hw;
  3610. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
  3611. "Entered %s.\n", __func__);
  3612. mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
  3613. if (mn == NULL) {
  3614. return QLA_MEMORY_ALLOC_FAILED;
  3615. }
  3616. /* Force Update? */
  3617. options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
  3618. /* Diagnostic firmware? */
  3619. /* options |= MENLO_DIAG_FW; */
  3620. /* We update the firmware with only one data sequence. */
  3621. options |= VCO_END_OF_DATA;
  3622. do {
  3623. retry = 0;
  3624. memset(mn, 0, sizeof(*mn));
  3625. mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
  3626. mn->p.req.entry_count = 1;
  3627. mn->p.req.options = cpu_to_le16(options);
  3628. ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
  3629. "Dump of Verify Request.\n");
  3630. ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
  3631. (uint8_t *)mn, sizeof(*mn));
  3632. rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
  3633. if (rval != QLA_SUCCESS) {
  3634. ql_dbg(ql_dbg_mbx, vha, 0x10cb,
  3635. "Failed to issue verify IOCB (%x).\n", rval);
  3636. goto verify_done;
  3637. }
  3638. ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
  3639. "Dump of Verify Response.\n");
  3640. ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
  3641. (uint8_t *)mn, sizeof(*mn));
  3642. status[0] = le16_to_cpu(mn->p.rsp.comp_status);
  3643. status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
  3644. le16_to_cpu(mn->p.rsp.failure_code) : 0;
  3645. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
  3646. "cs=%x fc=%x.\n", status[0], status[1]);
  3647. if (status[0] != CS_COMPLETE) {
  3648. rval = QLA_FUNCTION_FAILED;
  3649. if (!(options & VCO_DONT_UPDATE_FW)) {
  3650. ql_dbg(ql_dbg_mbx, vha, 0x10cf,
  3651. "Firmware update failed. Retrying "
  3652. "without update firmware.\n");
  3653. options |= VCO_DONT_UPDATE_FW;
  3654. options &= ~VCO_FORCE_UPDATE;
  3655. retry = 1;
  3656. }
  3657. } else {
  3658. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
  3659. "Firmware updated to %x.\n",
  3660. le32_to_cpu(mn->p.rsp.fw_ver));
  3661. /* NOTE: we only update OP firmware. */
  3662. spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
  3663. ha->cs84xx->op_fw_version =
  3664. le32_to_cpu(mn->p.rsp.fw_ver);
  3665. spin_unlock_irqrestore(&ha->cs84xx->access_lock,
  3666. flags);
  3667. }
  3668. } while (retry);
  3669. verify_done:
  3670. dma_pool_free(ha->s_dma_pool, mn, mn_dma);
  3671. if (rval != QLA_SUCCESS) {
  3672. ql_dbg(ql_dbg_mbx, vha, 0x10d1,
  3673. "Failed=%x.\n", rval);
  3674. } else {
  3675. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
  3676. "Done %s.\n", __func__);
  3677. }
  3678. return rval;
  3679. }
  3680. int
  3681. qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
  3682. {
  3683. int rval;
  3684. unsigned long flags;
  3685. mbx_cmd_t mc;
  3686. mbx_cmd_t *mcp = &mc;
  3687. struct qla_hw_data *ha = vha->hw;
  3688. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
  3689. "Entered %s.\n", __func__);
  3690. if (IS_SHADOW_REG_CAPABLE(ha))
  3691. req->options |= BIT_13;
  3692. mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
  3693. mcp->mb[1] = req->options;
  3694. mcp->mb[2] = MSW(LSD(req->dma));
  3695. mcp->mb[3] = LSW(LSD(req->dma));
  3696. mcp->mb[6] = MSW(MSD(req->dma));
  3697. mcp->mb[7] = LSW(MSD(req->dma));
  3698. mcp->mb[5] = req->length;
  3699. if (req->rsp)
  3700. mcp->mb[10] = req->rsp->id;
  3701. mcp->mb[12] = req->qos;
  3702. mcp->mb[11] = req->vp_idx;
  3703. mcp->mb[13] = req->rid;
  3704. if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
  3705. mcp->mb[15] = 0;
  3706. mcp->mb[4] = req->id;
  3707. /* que in ptr index */
  3708. mcp->mb[8] = 0;
  3709. /* que out ptr index */
  3710. mcp->mb[9] = *req->out_ptr = 0;
  3711. mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
  3712. MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3713. mcp->in_mb = MBX_0;
  3714. mcp->flags = MBX_DMA_OUT;
  3715. mcp->tov = MBX_TOV_SECONDS * 2;
  3716. if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
  3717. mcp->in_mb |= MBX_1;
  3718. if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  3719. mcp->out_mb |= MBX_15;
  3720. /* debug q create issue in SR-IOV */
  3721. mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
  3722. }
  3723. spin_lock_irqsave(&ha->hardware_lock, flags);
  3724. if (!(req->options & BIT_0)) {
  3725. WRT_REG_DWORD(req->req_q_in, 0);
  3726. if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
  3727. WRT_REG_DWORD(req->req_q_out, 0);
  3728. }
  3729. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3730. rval = qla2x00_mailbox_command(vha, mcp);
  3731. if (rval != QLA_SUCCESS) {
  3732. ql_dbg(ql_dbg_mbx, vha, 0x10d4,
  3733. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  3734. } else {
  3735. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
  3736. "Done %s.\n", __func__);
  3737. }
  3738. return rval;
  3739. }
  3740. int
  3741. qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  3742. {
  3743. int rval;
  3744. unsigned long flags;
  3745. mbx_cmd_t mc;
  3746. mbx_cmd_t *mcp = &mc;
  3747. struct qla_hw_data *ha = vha->hw;
  3748. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
  3749. "Entered %s.\n", __func__);
  3750. if (IS_SHADOW_REG_CAPABLE(ha))
  3751. rsp->options |= BIT_13;
  3752. mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
  3753. mcp->mb[1] = rsp->options;
  3754. mcp->mb[2] = MSW(LSD(rsp->dma));
  3755. mcp->mb[3] = LSW(LSD(rsp->dma));
  3756. mcp->mb[6] = MSW(MSD(rsp->dma));
  3757. mcp->mb[7] = LSW(MSD(rsp->dma));
  3758. mcp->mb[5] = rsp->length;
  3759. mcp->mb[14] = rsp->msix->entry;
  3760. mcp->mb[13] = rsp->rid;
  3761. if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
  3762. mcp->mb[15] = 0;
  3763. mcp->mb[4] = rsp->id;
  3764. /* que in ptr index */
  3765. mcp->mb[8] = *rsp->in_ptr = 0;
  3766. /* que out ptr index */
  3767. mcp->mb[9] = 0;
  3768. mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
  3769. |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3770. mcp->in_mb = MBX_0;
  3771. mcp->flags = MBX_DMA_OUT;
  3772. mcp->tov = MBX_TOV_SECONDS * 2;
  3773. if (IS_QLA81XX(ha)) {
  3774. mcp->out_mb |= MBX_12|MBX_11|MBX_10;
  3775. mcp->in_mb |= MBX_1;
  3776. } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
  3777. mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
  3778. mcp->in_mb |= MBX_1;
  3779. /* debug q create issue in SR-IOV */
  3780. mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
  3781. }
  3782. spin_lock_irqsave(&ha->hardware_lock, flags);
  3783. if (!(rsp->options & BIT_0)) {
  3784. WRT_REG_DWORD(rsp->rsp_q_out, 0);
  3785. if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
  3786. WRT_REG_DWORD(rsp->rsp_q_in, 0);
  3787. }
  3788. spin_unlock_irqrestore(&ha->hardware_lock, flags);
  3789. rval = qla2x00_mailbox_command(vha, mcp);
  3790. if (rval != QLA_SUCCESS) {
  3791. ql_dbg(ql_dbg_mbx, vha, 0x10d7,
  3792. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  3793. } else {
  3794. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
  3795. "Done %s.\n", __func__);
  3796. }
  3797. return rval;
  3798. }
  3799. int
  3800. qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
  3801. {
  3802. int rval;
  3803. mbx_cmd_t mc;
  3804. mbx_cmd_t *mcp = &mc;
  3805. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
  3806. "Entered %s.\n", __func__);
  3807. mcp->mb[0] = MBC_IDC_ACK;
  3808. memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
  3809. mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3810. mcp->in_mb = MBX_0;
  3811. mcp->tov = MBX_TOV_SECONDS;
  3812. mcp->flags = 0;
  3813. rval = qla2x00_mailbox_command(vha, mcp);
  3814. if (rval != QLA_SUCCESS) {
  3815. ql_dbg(ql_dbg_mbx, vha, 0x10da,
  3816. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  3817. } else {
  3818. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
  3819. "Done %s.\n", __func__);
  3820. }
  3821. return rval;
  3822. }
  3823. int
  3824. qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
  3825. {
  3826. int rval;
  3827. mbx_cmd_t mc;
  3828. mbx_cmd_t *mcp = &mc;
  3829. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
  3830. "Entered %s.\n", __func__);
  3831. if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
  3832. !IS_QLA27XX(vha->hw))
  3833. return QLA_FUNCTION_FAILED;
  3834. mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
  3835. mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
  3836. mcp->out_mb = MBX_1|MBX_0;
  3837. mcp->in_mb = MBX_1|MBX_0;
  3838. mcp->tov = MBX_TOV_SECONDS;
  3839. mcp->flags = 0;
  3840. rval = qla2x00_mailbox_command(vha, mcp);
  3841. if (rval != QLA_SUCCESS) {
  3842. ql_dbg(ql_dbg_mbx, vha, 0x10dd,
  3843. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3844. rval, mcp->mb[0], mcp->mb[1]);
  3845. } else {
  3846. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
  3847. "Done %s.\n", __func__);
  3848. *sector_size = mcp->mb[1];
  3849. }
  3850. return rval;
  3851. }
  3852. int
  3853. qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
  3854. {
  3855. int rval;
  3856. mbx_cmd_t mc;
  3857. mbx_cmd_t *mcp = &mc;
  3858. if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
  3859. !IS_QLA27XX(vha->hw))
  3860. return QLA_FUNCTION_FAILED;
  3861. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
  3862. "Entered %s.\n", __func__);
  3863. mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
  3864. mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
  3865. FAC_OPT_CMD_WRITE_PROTECT;
  3866. mcp->out_mb = MBX_1|MBX_0;
  3867. mcp->in_mb = MBX_1|MBX_0;
  3868. mcp->tov = MBX_TOV_SECONDS;
  3869. mcp->flags = 0;
  3870. rval = qla2x00_mailbox_command(vha, mcp);
  3871. if (rval != QLA_SUCCESS) {
  3872. ql_dbg(ql_dbg_mbx, vha, 0x10e0,
  3873. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3874. rval, mcp->mb[0], mcp->mb[1]);
  3875. } else {
  3876. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
  3877. "Done %s.\n", __func__);
  3878. }
  3879. return rval;
  3880. }
  3881. int
  3882. qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
  3883. {
  3884. int rval;
  3885. mbx_cmd_t mc;
  3886. mbx_cmd_t *mcp = &mc;
  3887. if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
  3888. !IS_QLA27XX(vha->hw))
  3889. return QLA_FUNCTION_FAILED;
  3890. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
  3891. "Entered %s.\n", __func__);
  3892. mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
  3893. mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
  3894. mcp->mb[2] = LSW(start);
  3895. mcp->mb[3] = MSW(start);
  3896. mcp->mb[4] = LSW(finish);
  3897. mcp->mb[5] = MSW(finish);
  3898. mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  3899. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  3900. mcp->tov = MBX_TOV_SECONDS;
  3901. mcp->flags = 0;
  3902. rval = qla2x00_mailbox_command(vha, mcp);
  3903. if (rval != QLA_SUCCESS) {
  3904. ql_dbg(ql_dbg_mbx, vha, 0x10e3,
  3905. "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
  3906. rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
  3907. } else {
  3908. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
  3909. "Done %s.\n", __func__);
  3910. }
  3911. return rval;
  3912. }
  3913. int
  3914. qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
  3915. {
  3916. int rval = 0;
  3917. mbx_cmd_t mc;
  3918. mbx_cmd_t *mcp = &mc;
  3919. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
  3920. "Entered %s.\n", __func__);
  3921. mcp->mb[0] = MBC_RESTART_MPI_FW;
  3922. mcp->out_mb = MBX_0;
  3923. mcp->in_mb = MBX_0|MBX_1;
  3924. mcp->tov = MBX_TOV_SECONDS;
  3925. mcp->flags = 0;
  3926. rval = qla2x00_mailbox_command(vha, mcp);
  3927. if (rval != QLA_SUCCESS) {
  3928. ql_dbg(ql_dbg_mbx, vha, 0x10e6,
  3929. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  3930. rval, mcp->mb[0], mcp->mb[1]);
  3931. } else {
  3932. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
  3933. "Done %s.\n", __func__);
  3934. }
  3935. return rval;
  3936. }
  3937. int
  3938. qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
  3939. {
  3940. int rval;
  3941. mbx_cmd_t mc;
  3942. mbx_cmd_t *mcp = &mc;
  3943. int i;
  3944. int len;
  3945. uint16_t *str;
  3946. struct qla_hw_data *ha = vha->hw;
  3947. if (!IS_P3P_TYPE(ha))
  3948. return QLA_FUNCTION_FAILED;
  3949. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
  3950. "Entered %s.\n", __func__);
  3951. str = (void *)version;
  3952. len = strlen(version);
  3953. mcp->mb[0] = MBC_SET_RNID_PARAMS;
  3954. mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
  3955. mcp->out_mb = MBX_1|MBX_0;
  3956. for (i = 4; i < 16 && len; i++, str++, len -= 2) {
  3957. mcp->mb[i] = cpu_to_le16p(str);
  3958. mcp->out_mb |= 1<<i;
  3959. }
  3960. for (; i < 16; i++) {
  3961. mcp->mb[i] = 0;
  3962. mcp->out_mb |= 1<<i;
  3963. }
  3964. mcp->in_mb = MBX_1|MBX_0;
  3965. mcp->tov = MBX_TOV_SECONDS;
  3966. mcp->flags = 0;
  3967. rval = qla2x00_mailbox_command(vha, mcp);
  3968. if (rval != QLA_SUCCESS) {
  3969. ql_dbg(ql_dbg_mbx, vha, 0x117c,
  3970. "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
  3971. } else {
  3972. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
  3973. "Done %s.\n", __func__);
  3974. }
  3975. return rval;
  3976. }
  3977. int
  3978. qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
  3979. {
  3980. int rval;
  3981. mbx_cmd_t mc;
  3982. mbx_cmd_t *mcp = &mc;
  3983. int len;
  3984. uint16_t dwlen;
  3985. uint8_t *str;
  3986. dma_addr_t str_dma;
  3987. struct qla_hw_data *ha = vha->hw;
  3988. if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
  3989. IS_P3P_TYPE(ha))
  3990. return QLA_FUNCTION_FAILED;
  3991. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
  3992. "Entered %s.\n", __func__);
  3993. str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
  3994. if (!str) {
  3995. ql_log(ql_log_warn, vha, 0x117f,
  3996. "Failed to allocate driver version param.\n");
  3997. return QLA_MEMORY_ALLOC_FAILED;
  3998. }
  3999. memcpy(str, "\x7\x3\x11\x0", 4);
  4000. dwlen = str[0];
  4001. len = dwlen * 4 - 4;
  4002. memset(str + 4, 0, len);
  4003. if (len > strlen(version))
  4004. len = strlen(version);
  4005. memcpy(str + 4, version, len);
  4006. mcp->mb[0] = MBC_SET_RNID_PARAMS;
  4007. mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
  4008. mcp->mb[2] = MSW(LSD(str_dma));
  4009. mcp->mb[3] = LSW(LSD(str_dma));
  4010. mcp->mb[6] = MSW(MSD(str_dma));
  4011. mcp->mb[7] = LSW(MSD(str_dma));
  4012. mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  4013. mcp->in_mb = MBX_1|MBX_0;
  4014. mcp->tov = MBX_TOV_SECONDS;
  4015. mcp->flags = 0;
  4016. rval = qla2x00_mailbox_command(vha, mcp);
  4017. if (rval != QLA_SUCCESS) {
  4018. ql_dbg(ql_dbg_mbx, vha, 0x1180,
  4019. "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
  4020. } else {
  4021. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
  4022. "Done %s.\n", __func__);
  4023. }
  4024. dma_pool_free(ha->s_dma_pool, str, str_dma);
  4025. return rval;
  4026. }
  4027. static int
  4028. qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
  4029. {
  4030. int rval;
  4031. mbx_cmd_t mc;
  4032. mbx_cmd_t *mcp = &mc;
  4033. if (!IS_FWI2_CAPABLE(vha->hw))
  4034. return QLA_FUNCTION_FAILED;
  4035. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
  4036. "Entered %s.\n", __func__);
  4037. mcp->mb[0] = MBC_GET_RNID_PARAMS;
  4038. mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
  4039. mcp->out_mb = MBX_1|MBX_0;
  4040. mcp->in_mb = MBX_1|MBX_0;
  4041. mcp->tov = MBX_TOV_SECONDS;
  4042. mcp->flags = 0;
  4043. rval = qla2x00_mailbox_command(vha, mcp);
  4044. *temp = mcp->mb[1];
  4045. if (rval != QLA_SUCCESS) {
  4046. ql_dbg(ql_dbg_mbx, vha, 0x115a,
  4047. "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
  4048. } else {
  4049. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
  4050. "Done %s.\n", __func__);
  4051. }
  4052. return rval;
  4053. }
  4054. int
  4055. qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
  4056. uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
  4057. {
  4058. int rval;
  4059. mbx_cmd_t mc;
  4060. mbx_cmd_t *mcp = &mc;
  4061. struct qla_hw_data *ha = vha->hw;
  4062. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
  4063. "Entered %s.\n", __func__);
  4064. if (!IS_FWI2_CAPABLE(ha))
  4065. return QLA_FUNCTION_FAILED;
  4066. if (len == 1)
  4067. opt |= BIT_0;
  4068. mcp->mb[0] = MBC_READ_SFP;
  4069. mcp->mb[1] = dev;
  4070. mcp->mb[2] = MSW(sfp_dma);
  4071. mcp->mb[3] = LSW(sfp_dma);
  4072. mcp->mb[6] = MSW(MSD(sfp_dma));
  4073. mcp->mb[7] = LSW(MSD(sfp_dma));
  4074. mcp->mb[8] = len;
  4075. mcp->mb[9] = off;
  4076. mcp->mb[10] = opt;
  4077. mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  4078. mcp->in_mb = MBX_1|MBX_0;
  4079. mcp->tov = MBX_TOV_SECONDS;
  4080. mcp->flags = 0;
  4081. rval = qla2x00_mailbox_command(vha, mcp);
  4082. if (opt & BIT_0)
  4083. *sfp = mcp->mb[1];
  4084. if (rval != QLA_SUCCESS) {
  4085. ql_dbg(ql_dbg_mbx, vha, 0x10e9,
  4086. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4087. if (mcp->mb[0] == MBS_COMMAND_ERROR &&
  4088. mcp->mb[1] == 0x22)
  4089. /* sfp is not there */
  4090. rval = QLA_INTERFACE_ERROR;
  4091. } else {
  4092. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
  4093. "Done %s.\n", __func__);
  4094. }
  4095. return rval;
  4096. }
  4097. int
  4098. qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
  4099. uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
  4100. {
  4101. int rval;
  4102. mbx_cmd_t mc;
  4103. mbx_cmd_t *mcp = &mc;
  4104. struct qla_hw_data *ha = vha->hw;
  4105. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
  4106. "Entered %s.\n", __func__);
  4107. if (!IS_FWI2_CAPABLE(ha))
  4108. return QLA_FUNCTION_FAILED;
  4109. if (len == 1)
  4110. opt |= BIT_0;
  4111. if (opt & BIT_0)
  4112. len = *sfp;
  4113. mcp->mb[0] = MBC_WRITE_SFP;
  4114. mcp->mb[1] = dev;
  4115. mcp->mb[2] = MSW(sfp_dma);
  4116. mcp->mb[3] = LSW(sfp_dma);
  4117. mcp->mb[6] = MSW(MSD(sfp_dma));
  4118. mcp->mb[7] = LSW(MSD(sfp_dma));
  4119. mcp->mb[8] = len;
  4120. mcp->mb[9] = off;
  4121. mcp->mb[10] = opt;
  4122. mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  4123. mcp->in_mb = MBX_1|MBX_0;
  4124. mcp->tov = MBX_TOV_SECONDS;
  4125. mcp->flags = 0;
  4126. rval = qla2x00_mailbox_command(vha, mcp);
  4127. if (rval != QLA_SUCCESS) {
  4128. ql_dbg(ql_dbg_mbx, vha, 0x10ec,
  4129. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4130. } else {
  4131. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
  4132. "Done %s.\n", __func__);
  4133. }
  4134. return rval;
  4135. }
  4136. int
  4137. qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
  4138. uint16_t size_in_bytes, uint16_t *actual_size)
  4139. {
  4140. int rval;
  4141. mbx_cmd_t mc;
  4142. mbx_cmd_t *mcp = &mc;
  4143. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
  4144. "Entered %s.\n", __func__);
  4145. if (!IS_CNA_CAPABLE(vha->hw))
  4146. return QLA_FUNCTION_FAILED;
  4147. mcp->mb[0] = MBC_GET_XGMAC_STATS;
  4148. mcp->mb[2] = MSW(stats_dma);
  4149. mcp->mb[3] = LSW(stats_dma);
  4150. mcp->mb[6] = MSW(MSD(stats_dma));
  4151. mcp->mb[7] = LSW(MSD(stats_dma));
  4152. mcp->mb[8] = size_in_bytes >> 2;
  4153. mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
  4154. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  4155. mcp->tov = MBX_TOV_SECONDS;
  4156. mcp->flags = 0;
  4157. rval = qla2x00_mailbox_command(vha, mcp);
  4158. if (rval != QLA_SUCCESS) {
  4159. ql_dbg(ql_dbg_mbx, vha, 0x10ef,
  4160. "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
  4161. rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
  4162. } else {
  4163. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
  4164. "Done %s.\n", __func__);
  4165. *actual_size = mcp->mb[2] << 2;
  4166. }
  4167. return rval;
  4168. }
  4169. int
  4170. qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
  4171. uint16_t size)
  4172. {
  4173. int rval;
  4174. mbx_cmd_t mc;
  4175. mbx_cmd_t *mcp = &mc;
  4176. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
  4177. "Entered %s.\n", __func__);
  4178. if (!IS_CNA_CAPABLE(vha->hw))
  4179. return QLA_FUNCTION_FAILED;
  4180. mcp->mb[0] = MBC_GET_DCBX_PARAMS;
  4181. mcp->mb[1] = 0;
  4182. mcp->mb[2] = MSW(tlv_dma);
  4183. mcp->mb[3] = LSW(tlv_dma);
  4184. mcp->mb[6] = MSW(MSD(tlv_dma));
  4185. mcp->mb[7] = LSW(MSD(tlv_dma));
  4186. mcp->mb[8] = size;
  4187. mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  4188. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  4189. mcp->tov = MBX_TOV_SECONDS;
  4190. mcp->flags = 0;
  4191. rval = qla2x00_mailbox_command(vha, mcp);
  4192. if (rval != QLA_SUCCESS) {
  4193. ql_dbg(ql_dbg_mbx, vha, 0x10f2,
  4194. "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
  4195. rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
  4196. } else {
  4197. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
  4198. "Done %s.\n", __func__);
  4199. }
  4200. return rval;
  4201. }
  4202. int
  4203. qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
  4204. {
  4205. int rval;
  4206. mbx_cmd_t mc;
  4207. mbx_cmd_t *mcp = &mc;
  4208. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
  4209. "Entered %s.\n", __func__);
  4210. if (!IS_FWI2_CAPABLE(vha->hw))
  4211. return QLA_FUNCTION_FAILED;
  4212. mcp->mb[0] = MBC_READ_RAM_EXTENDED;
  4213. mcp->mb[1] = LSW(risc_addr);
  4214. mcp->mb[8] = MSW(risc_addr);
  4215. mcp->out_mb = MBX_8|MBX_1|MBX_0;
  4216. mcp->in_mb = MBX_3|MBX_2|MBX_0;
  4217. mcp->tov = 30;
  4218. mcp->flags = 0;
  4219. rval = qla2x00_mailbox_command(vha, mcp);
  4220. if (rval != QLA_SUCCESS) {
  4221. ql_dbg(ql_dbg_mbx, vha, 0x10f5,
  4222. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4223. } else {
  4224. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
  4225. "Done %s.\n", __func__);
  4226. *data = mcp->mb[3] << 16 | mcp->mb[2];
  4227. }
  4228. return rval;
  4229. }
  4230. int
  4231. qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
  4232. uint16_t *mresp)
  4233. {
  4234. int rval;
  4235. mbx_cmd_t mc;
  4236. mbx_cmd_t *mcp = &mc;
  4237. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
  4238. "Entered %s.\n", __func__);
  4239. memset(mcp->mb, 0 , sizeof(mcp->mb));
  4240. mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
  4241. mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
  4242. /* transfer count */
  4243. mcp->mb[10] = LSW(mreq->transfer_size);
  4244. mcp->mb[11] = MSW(mreq->transfer_size);
  4245. /* send data address */
  4246. mcp->mb[14] = LSW(mreq->send_dma);
  4247. mcp->mb[15] = MSW(mreq->send_dma);
  4248. mcp->mb[20] = LSW(MSD(mreq->send_dma));
  4249. mcp->mb[21] = MSW(MSD(mreq->send_dma));
  4250. /* receive data address */
  4251. mcp->mb[16] = LSW(mreq->rcv_dma);
  4252. mcp->mb[17] = MSW(mreq->rcv_dma);
  4253. mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
  4254. mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
  4255. /* Iteration count */
  4256. mcp->mb[18] = LSW(mreq->iteration_count);
  4257. mcp->mb[19] = MSW(mreq->iteration_count);
  4258. mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
  4259. MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
  4260. if (IS_CNA_CAPABLE(vha->hw))
  4261. mcp->out_mb |= MBX_2;
  4262. mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
  4263. mcp->buf_size = mreq->transfer_size;
  4264. mcp->tov = MBX_TOV_SECONDS;
  4265. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
  4266. rval = qla2x00_mailbox_command(vha, mcp);
  4267. if (rval != QLA_SUCCESS) {
  4268. ql_dbg(ql_dbg_mbx, vha, 0x10f8,
  4269. "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
  4270. "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
  4271. mcp->mb[3], mcp->mb[18], mcp->mb[19]);
  4272. } else {
  4273. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
  4274. "Done %s.\n", __func__);
  4275. }
  4276. /* Copy mailbox information */
  4277. memcpy( mresp, mcp->mb, 64);
  4278. return rval;
  4279. }
  4280. int
  4281. qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
  4282. uint16_t *mresp)
  4283. {
  4284. int rval;
  4285. mbx_cmd_t mc;
  4286. mbx_cmd_t *mcp = &mc;
  4287. struct qla_hw_data *ha = vha->hw;
  4288. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
  4289. "Entered %s.\n", __func__);
  4290. memset(mcp->mb, 0 , sizeof(mcp->mb));
  4291. mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
  4292. /* BIT_6 specifies 64bit address */
  4293. mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
  4294. if (IS_CNA_CAPABLE(ha)) {
  4295. mcp->mb[2] = vha->fcoe_fcf_idx;
  4296. }
  4297. mcp->mb[16] = LSW(mreq->rcv_dma);
  4298. mcp->mb[17] = MSW(mreq->rcv_dma);
  4299. mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
  4300. mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
  4301. mcp->mb[10] = LSW(mreq->transfer_size);
  4302. mcp->mb[14] = LSW(mreq->send_dma);
  4303. mcp->mb[15] = MSW(mreq->send_dma);
  4304. mcp->mb[20] = LSW(MSD(mreq->send_dma));
  4305. mcp->mb[21] = MSW(MSD(mreq->send_dma));
  4306. mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
  4307. MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
  4308. if (IS_CNA_CAPABLE(ha))
  4309. mcp->out_mb |= MBX_2;
  4310. mcp->in_mb = MBX_0;
  4311. if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
  4312. IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
  4313. mcp->in_mb |= MBX_1;
  4314. if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
  4315. mcp->in_mb |= MBX_3;
  4316. mcp->tov = MBX_TOV_SECONDS;
  4317. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
  4318. mcp->buf_size = mreq->transfer_size;
  4319. rval = qla2x00_mailbox_command(vha, mcp);
  4320. if (rval != QLA_SUCCESS) {
  4321. ql_dbg(ql_dbg_mbx, vha, 0x10fb,
  4322. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  4323. rval, mcp->mb[0], mcp->mb[1]);
  4324. } else {
  4325. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
  4326. "Done %s.\n", __func__);
  4327. }
  4328. /* Copy mailbox information */
  4329. memcpy(mresp, mcp->mb, 64);
  4330. return rval;
  4331. }
  4332. int
  4333. qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
  4334. {
  4335. int rval;
  4336. mbx_cmd_t mc;
  4337. mbx_cmd_t *mcp = &mc;
  4338. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
  4339. "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
  4340. mcp->mb[0] = MBC_ISP84XX_RESET;
  4341. mcp->mb[1] = enable_diagnostic;
  4342. mcp->out_mb = MBX_1|MBX_0;
  4343. mcp->in_mb = MBX_1|MBX_0;
  4344. mcp->tov = MBX_TOV_SECONDS;
  4345. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
  4346. rval = qla2x00_mailbox_command(vha, mcp);
  4347. if (rval != QLA_SUCCESS)
  4348. ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
  4349. else
  4350. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
  4351. "Done %s.\n", __func__);
  4352. return rval;
  4353. }
  4354. int
  4355. qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
  4356. {
  4357. int rval;
  4358. mbx_cmd_t mc;
  4359. mbx_cmd_t *mcp = &mc;
  4360. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
  4361. "Entered %s.\n", __func__);
  4362. if (!IS_FWI2_CAPABLE(vha->hw))
  4363. return QLA_FUNCTION_FAILED;
  4364. mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
  4365. mcp->mb[1] = LSW(risc_addr);
  4366. mcp->mb[2] = LSW(data);
  4367. mcp->mb[3] = MSW(data);
  4368. mcp->mb[8] = MSW(risc_addr);
  4369. mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
  4370. mcp->in_mb = MBX_0;
  4371. mcp->tov = 30;
  4372. mcp->flags = 0;
  4373. rval = qla2x00_mailbox_command(vha, mcp);
  4374. if (rval != QLA_SUCCESS) {
  4375. ql_dbg(ql_dbg_mbx, vha, 0x1101,
  4376. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4377. } else {
  4378. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
  4379. "Done %s.\n", __func__);
  4380. }
  4381. return rval;
  4382. }
  4383. int
  4384. qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
  4385. {
  4386. int rval;
  4387. uint32_t stat, timer;
  4388. uint16_t mb0 = 0;
  4389. struct qla_hw_data *ha = vha->hw;
  4390. struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
  4391. rval = QLA_SUCCESS;
  4392. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
  4393. "Entered %s.\n", __func__);
  4394. clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
  4395. /* Write the MBC data to the registers */
  4396. WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
  4397. WRT_REG_WORD(&reg->mailbox1, mb[0]);
  4398. WRT_REG_WORD(&reg->mailbox2, mb[1]);
  4399. WRT_REG_WORD(&reg->mailbox3, mb[2]);
  4400. WRT_REG_WORD(&reg->mailbox4, mb[3]);
  4401. WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
  4402. /* Poll for MBC interrupt */
  4403. for (timer = 6000000; timer; timer--) {
  4404. /* Check for pending interrupts. */
  4405. stat = RD_REG_DWORD(&reg->host_status);
  4406. if (stat & HSRX_RISC_INT) {
  4407. stat &= 0xff;
  4408. if (stat == 0x1 || stat == 0x2 ||
  4409. stat == 0x10 || stat == 0x11) {
  4410. set_bit(MBX_INTERRUPT,
  4411. &ha->mbx_cmd_flags);
  4412. mb0 = RD_REG_WORD(&reg->mailbox0);
  4413. WRT_REG_DWORD(&reg->hccr,
  4414. HCCRX_CLR_RISC_INT);
  4415. RD_REG_DWORD(&reg->hccr);
  4416. break;
  4417. }
  4418. }
  4419. udelay(5);
  4420. }
  4421. if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
  4422. rval = mb0 & MBS_MASK;
  4423. else
  4424. rval = QLA_FUNCTION_FAILED;
  4425. if (rval != QLA_SUCCESS) {
  4426. ql_dbg(ql_dbg_mbx, vha, 0x1104,
  4427. "Failed=%x mb[0]=%x.\n", rval, mb[0]);
  4428. } else {
  4429. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
  4430. "Done %s.\n", __func__);
  4431. }
  4432. return rval;
  4433. }
  4434. int
  4435. qla2x00_get_data_rate(scsi_qla_host_t *vha)
  4436. {
  4437. int rval;
  4438. mbx_cmd_t mc;
  4439. mbx_cmd_t *mcp = &mc;
  4440. struct qla_hw_data *ha = vha->hw;
  4441. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
  4442. "Entered %s.\n", __func__);
  4443. if (!IS_FWI2_CAPABLE(ha))
  4444. return QLA_FUNCTION_FAILED;
  4445. mcp->mb[0] = MBC_DATA_RATE;
  4446. mcp->mb[1] = 0;
  4447. mcp->out_mb = MBX_1|MBX_0;
  4448. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  4449. if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
  4450. mcp->in_mb |= MBX_3;
  4451. mcp->tov = MBX_TOV_SECONDS;
  4452. mcp->flags = 0;
  4453. rval = qla2x00_mailbox_command(vha, mcp);
  4454. if (rval != QLA_SUCCESS) {
  4455. ql_dbg(ql_dbg_mbx, vha, 0x1107,
  4456. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4457. } else {
  4458. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
  4459. "Done %s.\n", __func__);
  4460. if (mcp->mb[1] != 0x7)
  4461. ha->link_data_rate = mcp->mb[1];
  4462. }
  4463. return rval;
  4464. }
  4465. int
  4466. qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
  4467. {
  4468. int rval;
  4469. mbx_cmd_t mc;
  4470. mbx_cmd_t *mcp = &mc;
  4471. struct qla_hw_data *ha = vha->hw;
  4472. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
  4473. "Entered %s.\n", __func__);
  4474. if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
  4475. !IS_QLA27XX(ha))
  4476. return QLA_FUNCTION_FAILED;
  4477. mcp->mb[0] = MBC_GET_PORT_CONFIG;
  4478. mcp->out_mb = MBX_0;
  4479. mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4480. mcp->tov = MBX_TOV_SECONDS;
  4481. mcp->flags = 0;
  4482. rval = qla2x00_mailbox_command(vha, mcp);
  4483. if (rval != QLA_SUCCESS) {
  4484. ql_dbg(ql_dbg_mbx, vha, 0x110a,
  4485. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4486. } else {
  4487. /* Copy all bits to preserve original value */
  4488. memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
  4489. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
  4490. "Done %s.\n", __func__);
  4491. }
  4492. return rval;
  4493. }
  4494. int
  4495. qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
  4496. {
  4497. int rval;
  4498. mbx_cmd_t mc;
  4499. mbx_cmd_t *mcp = &mc;
  4500. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
  4501. "Entered %s.\n", __func__);
  4502. mcp->mb[0] = MBC_SET_PORT_CONFIG;
  4503. /* Copy all bits to preserve original setting */
  4504. memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
  4505. mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4506. mcp->in_mb = MBX_0;
  4507. mcp->tov = MBX_TOV_SECONDS;
  4508. mcp->flags = 0;
  4509. rval = qla2x00_mailbox_command(vha, mcp);
  4510. if (rval != QLA_SUCCESS) {
  4511. ql_dbg(ql_dbg_mbx, vha, 0x110d,
  4512. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4513. } else
  4514. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
  4515. "Done %s.\n", __func__);
  4516. return rval;
  4517. }
  4518. int
  4519. qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
  4520. uint16_t *mb)
  4521. {
  4522. int rval;
  4523. mbx_cmd_t mc;
  4524. mbx_cmd_t *mcp = &mc;
  4525. struct qla_hw_data *ha = vha->hw;
  4526. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
  4527. "Entered %s.\n", __func__);
  4528. if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
  4529. return QLA_FUNCTION_FAILED;
  4530. mcp->mb[0] = MBC_PORT_PARAMS;
  4531. mcp->mb[1] = loop_id;
  4532. if (ha->flags.fcp_prio_enabled)
  4533. mcp->mb[2] = BIT_1;
  4534. else
  4535. mcp->mb[2] = BIT_2;
  4536. mcp->mb[4] = priority & 0xf;
  4537. mcp->mb[9] = vha->vp_idx;
  4538. mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4539. mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
  4540. mcp->tov = 30;
  4541. mcp->flags = 0;
  4542. rval = qla2x00_mailbox_command(vha, mcp);
  4543. if (mb != NULL) {
  4544. mb[0] = mcp->mb[0];
  4545. mb[1] = mcp->mb[1];
  4546. mb[3] = mcp->mb[3];
  4547. mb[4] = mcp->mb[4];
  4548. }
  4549. if (rval != QLA_SUCCESS) {
  4550. ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
  4551. } else {
  4552. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
  4553. "Done %s.\n", __func__);
  4554. }
  4555. return rval;
  4556. }
  4557. int
  4558. qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
  4559. {
  4560. int rval = QLA_FUNCTION_FAILED;
  4561. struct qla_hw_data *ha = vha->hw;
  4562. uint8_t byte;
  4563. if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
  4564. ql_dbg(ql_dbg_mbx, vha, 0x1150,
  4565. "Thermal not supported by this card.\n");
  4566. return rval;
  4567. }
  4568. if (IS_QLA25XX(ha)) {
  4569. if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
  4570. ha->pdev->subsystem_device == 0x0175) {
  4571. rval = qla2x00_read_sfp(vha, 0, &byte,
  4572. 0x98, 0x1, 1, BIT_13|BIT_0);
  4573. *temp = byte;
  4574. return rval;
  4575. }
  4576. if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
  4577. ha->pdev->subsystem_device == 0x338e) {
  4578. rval = qla2x00_read_sfp(vha, 0, &byte,
  4579. 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
  4580. *temp = byte;
  4581. return rval;
  4582. }
  4583. ql_dbg(ql_dbg_mbx, vha, 0x10c9,
  4584. "Thermal not supported by this card.\n");
  4585. return rval;
  4586. }
  4587. if (IS_QLA82XX(ha)) {
  4588. *temp = qla82xx_read_temperature(vha);
  4589. rval = QLA_SUCCESS;
  4590. return rval;
  4591. } else if (IS_QLA8044(ha)) {
  4592. *temp = qla8044_read_temperature(vha);
  4593. rval = QLA_SUCCESS;
  4594. return rval;
  4595. }
  4596. rval = qla2x00_read_asic_temperature(vha, temp);
  4597. return rval;
  4598. }
  4599. int
  4600. qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
  4601. {
  4602. int rval;
  4603. struct qla_hw_data *ha = vha->hw;
  4604. mbx_cmd_t mc;
  4605. mbx_cmd_t *mcp = &mc;
  4606. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
  4607. "Entered %s.\n", __func__);
  4608. if (!IS_FWI2_CAPABLE(ha))
  4609. return QLA_FUNCTION_FAILED;
  4610. memset(mcp, 0, sizeof(mbx_cmd_t));
  4611. mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
  4612. mcp->mb[1] = 1;
  4613. mcp->out_mb = MBX_1|MBX_0;
  4614. mcp->in_mb = MBX_0;
  4615. mcp->tov = 30;
  4616. mcp->flags = 0;
  4617. rval = qla2x00_mailbox_command(vha, mcp);
  4618. if (rval != QLA_SUCCESS) {
  4619. ql_dbg(ql_dbg_mbx, vha, 0x1016,
  4620. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4621. } else {
  4622. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
  4623. "Done %s.\n", __func__);
  4624. }
  4625. return rval;
  4626. }
  4627. int
  4628. qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
  4629. {
  4630. int rval;
  4631. struct qla_hw_data *ha = vha->hw;
  4632. mbx_cmd_t mc;
  4633. mbx_cmd_t *mcp = &mc;
  4634. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
  4635. "Entered %s.\n", __func__);
  4636. if (!IS_P3P_TYPE(ha))
  4637. return QLA_FUNCTION_FAILED;
  4638. memset(mcp, 0, sizeof(mbx_cmd_t));
  4639. mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
  4640. mcp->mb[1] = 0;
  4641. mcp->out_mb = MBX_1|MBX_0;
  4642. mcp->in_mb = MBX_0;
  4643. mcp->tov = 30;
  4644. mcp->flags = 0;
  4645. rval = qla2x00_mailbox_command(vha, mcp);
  4646. if (rval != QLA_SUCCESS) {
  4647. ql_dbg(ql_dbg_mbx, vha, 0x100c,
  4648. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4649. } else {
  4650. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
  4651. "Done %s.\n", __func__);
  4652. }
  4653. return rval;
  4654. }
  4655. int
  4656. qla82xx_md_get_template_size(scsi_qla_host_t *vha)
  4657. {
  4658. struct qla_hw_data *ha = vha->hw;
  4659. mbx_cmd_t mc;
  4660. mbx_cmd_t *mcp = &mc;
  4661. int rval = QLA_FUNCTION_FAILED;
  4662. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
  4663. "Entered %s.\n", __func__);
  4664. memset(mcp->mb, 0 , sizeof(mcp->mb));
  4665. mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
  4666. mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
  4667. mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
  4668. mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
  4669. mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  4670. mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
  4671. MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4672. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
  4673. mcp->tov = MBX_TOV_SECONDS;
  4674. rval = qla2x00_mailbox_command(vha, mcp);
  4675. /* Always copy back return mailbox values. */
  4676. if (rval != QLA_SUCCESS) {
  4677. ql_dbg(ql_dbg_mbx, vha, 0x1120,
  4678. "mailbox command FAILED=0x%x, subcode=%x.\n",
  4679. (mcp->mb[1] << 16) | mcp->mb[0],
  4680. (mcp->mb[3] << 16) | mcp->mb[2]);
  4681. } else {
  4682. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
  4683. "Done %s.\n", __func__);
  4684. ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
  4685. if (!ha->md_template_size) {
  4686. ql_dbg(ql_dbg_mbx, vha, 0x1122,
  4687. "Null template size obtained.\n");
  4688. rval = QLA_FUNCTION_FAILED;
  4689. }
  4690. }
  4691. return rval;
  4692. }
  4693. int
  4694. qla82xx_md_get_template(scsi_qla_host_t *vha)
  4695. {
  4696. struct qla_hw_data *ha = vha->hw;
  4697. mbx_cmd_t mc;
  4698. mbx_cmd_t *mcp = &mc;
  4699. int rval = QLA_FUNCTION_FAILED;
  4700. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
  4701. "Entered %s.\n", __func__);
  4702. ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
  4703. ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
  4704. if (!ha->md_tmplt_hdr) {
  4705. ql_log(ql_log_warn, vha, 0x1124,
  4706. "Unable to allocate memory for Minidump template.\n");
  4707. return rval;
  4708. }
  4709. memset(mcp->mb, 0 , sizeof(mcp->mb));
  4710. mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
  4711. mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
  4712. mcp->mb[2] = LSW(RQST_TMPLT);
  4713. mcp->mb[3] = MSW(RQST_TMPLT);
  4714. mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
  4715. mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
  4716. mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
  4717. mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
  4718. mcp->mb[8] = LSW(ha->md_template_size);
  4719. mcp->mb[9] = MSW(ha->md_template_size);
  4720. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
  4721. mcp->tov = MBX_TOV_SECONDS;
  4722. mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
  4723. MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4724. mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  4725. rval = qla2x00_mailbox_command(vha, mcp);
  4726. if (rval != QLA_SUCCESS) {
  4727. ql_dbg(ql_dbg_mbx, vha, 0x1125,
  4728. "mailbox command FAILED=0x%x, subcode=%x.\n",
  4729. ((mcp->mb[1] << 16) | mcp->mb[0]),
  4730. ((mcp->mb[3] << 16) | mcp->mb[2]));
  4731. } else
  4732. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
  4733. "Done %s.\n", __func__);
  4734. return rval;
  4735. }
  4736. int
  4737. qla8044_md_get_template(scsi_qla_host_t *vha)
  4738. {
  4739. struct qla_hw_data *ha = vha->hw;
  4740. mbx_cmd_t mc;
  4741. mbx_cmd_t *mcp = &mc;
  4742. int rval = QLA_FUNCTION_FAILED;
  4743. int offset = 0, size = MINIDUMP_SIZE_36K;
  4744. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
  4745. "Entered %s.\n", __func__);
  4746. ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
  4747. ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
  4748. if (!ha->md_tmplt_hdr) {
  4749. ql_log(ql_log_warn, vha, 0xb11b,
  4750. "Unable to allocate memory for Minidump template.\n");
  4751. return rval;
  4752. }
  4753. memset(mcp->mb, 0 , sizeof(mcp->mb));
  4754. while (offset < ha->md_template_size) {
  4755. mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
  4756. mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
  4757. mcp->mb[2] = LSW(RQST_TMPLT);
  4758. mcp->mb[3] = MSW(RQST_TMPLT);
  4759. mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
  4760. mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
  4761. mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
  4762. mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
  4763. mcp->mb[8] = LSW(size);
  4764. mcp->mb[9] = MSW(size);
  4765. mcp->mb[10] = offset & 0x0000FFFF;
  4766. mcp->mb[11] = offset & 0xFFFF0000;
  4767. mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
  4768. mcp->tov = MBX_TOV_SECONDS;
  4769. mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
  4770. MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4771. mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  4772. rval = qla2x00_mailbox_command(vha, mcp);
  4773. if (rval != QLA_SUCCESS) {
  4774. ql_dbg(ql_dbg_mbx, vha, 0xb11c,
  4775. "mailbox command FAILED=0x%x, subcode=%x.\n",
  4776. ((mcp->mb[1] << 16) | mcp->mb[0]),
  4777. ((mcp->mb[3] << 16) | mcp->mb[2]));
  4778. return rval;
  4779. } else
  4780. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
  4781. "Done %s.\n", __func__);
  4782. offset = offset + size;
  4783. }
  4784. return rval;
  4785. }
  4786. int
  4787. qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
  4788. {
  4789. int rval;
  4790. struct qla_hw_data *ha = vha->hw;
  4791. mbx_cmd_t mc;
  4792. mbx_cmd_t *mcp = &mc;
  4793. if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
  4794. return QLA_FUNCTION_FAILED;
  4795. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
  4796. "Entered %s.\n", __func__);
  4797. memset(mcp, 0, sizeof(mbx_cmd_t));
  4798. mcp->mb[0] = MBC_SET_LED_CONFIG;
  4799. mcp->mb[1] = led_cfg[0];
  4800. mcp->mb[2] = led_cfg[1];
  4801. if (IS_QLA8031(ha)) {
  4802. mcp->mb[3] = led_cfg[2];
  4803. mcp->mb[4] = led_cfg[3];
  4804. mcp->mb[5] = led_cfg[4];
  4805. mcp->mb[6] = led_cfg[5];
  4806. }
  4807. mcp->out_mb = MBX_2|MBX_1|MBX_0;
  4808. if (IS_QLA8031(ha))
  4809. mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
  4810. mcp->in_mb = MBX_0;
  4811. mcp->tov = 30;
  4812. mcp->flags = 0;
  4813. rval = qla2x00_mailbox_command(vha, mcp);
  4814. if (rval != QLA_SUCCESS) {
  4815. ql_dbg(ql_dbg_mbx, vha, 0x1134,
  4816. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4817. } else {
  4818. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
  4819. "Done %s.\n", __func__);
  4820. }
  4821. return rval;
  4822. }
  4823. int
  4824. qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
  4825. {
  4826. int rval;
  4827. struct qla_hw_data *ha = vha->hw;
  4828. mbx_cmd_t mc;
  4829. mbx_cmd_t *mcp = &mc;
  4830. if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
  4831. return QLA_FUNCTION_FAILED;
  4832. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
  4833. "Entered %s.\n", __func__);
  4834. memset(mcp, 0, sizeof(mbx_cmd_t));
  4835. mcp->mb[0] = MBC_GET_LED_CONFIG;
  4836. mcp->out_mb = MBX_0;
  4837. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  4838. if (IS_QLA8031(ha))
  4839. mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
  4840. mcp->tov = 30;
  4841. mcp->flags = 0;
  4842. rval = qla2x00_mailbox_command(vha, mcp);
  4843. if (rval != QLA_SUCCESS) {
  4844. ql_dbg(ql_dbg_mbx, vha, 0x1137,
  4845. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4846. } else {
  4847. led_cfg[0] = mcp->mb[1];
  4848. led_cfg[1] = mcp->mb[2];
  4849. if (IS_QLA8031(ha)) {
  4850. led_cfg[2] = mcp->mb[3];
  4851. led_cfg[3] = mcp->mb[4];
  4852. led_cfg[4] = mcp->mb[5];
  4853. led_cfg[5] = mcp->mb[6];
  4854. }
  4855. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
  4856. "Done %s.\n", __func__);
  4857. }
  4858. return rval;
  4859. }
  4860. int
  4861. qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
  4862. {
  4863. int rval;
  4864. struct qla_hw_data *ha = vha->hw;
  4865. mbx_cmd_t mc;
  4866. mbx_cmd_t *mcp = &mc;
  4867. if (!IS_P3P_TYPE(ha))
  4868. return QLA_FUNCTION_FAILED;
  4869. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
  4870. "Entered %s.\n", __func__);
  4871. memset(mcp, 0, sizeof(mbx_cmd_t));
  4872. mcp->mb[0] = MBC_SET_LED_CONFIG;
  4873. if (enable)
  4874. mcp->mb[7] = 0xE;
  4875. else
  4876. mcp->mb[7] = 0xD;
  4877. mcp->out_mb = MBX_7|MBX_0;
  4878. mcp->in_mb = MBX_0;
  4879. mcp->tov = MBX_TOV_SECONDS;
  4880. mcp->flags = 0;
  4881. rval = qla2x00_mailbox_command(vha, mcp);
  4882. if (rval != QLA_SUCCESS) {
  4883. ql_dbg(ql_dbg_mbx, vha, 0x1128,
  4884. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4885. } else {
  4886. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
  4887. "Done %s.\n", __func__);
  4888. }
  4889. return rval;
  4890. }
  4891. int
  4892. qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
  4893. {
  4894. int rval;
  4895. struct qla_hw_data *ha = vha->hw;
  4896. mbx_cmd_t mc;
  4897. mbx_cmd_t *mcp = &mc;
  4898. if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
  4899. return QLA_FUNCTION_FAILED;
  4900. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
  4901. "Entered %s.\n", __func__);
  4902. mcp->mb[0] = MBC_WRITE_REMOTE_REG;
  4903. mcp->mb[1] = LSW(reg);
  4904. mcp->mb[2] = MSW(reg);
  4905. mcp->mb[3] = LSW(data);
  4906. mcp->mb[4] = MSW(data);
  4907. mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
  4908. mcp->in_mb = MBX_1|MBX_0;
  4909. mcp->tov = MBX_TOV_SECONDS;
  4910. mcp->flags = 0;
  4911. rval = qla2x00_mailbox_command(vha, mcp);
  4912. if (rval != QLA_SUCCESS) {
  4913. ql_dbg(ql_dbg_mbx, vha, 0x1131,
  4914. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4915. } else {
  4916. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
  4917. "Done %s.\n", __func__);
  4918. }
  4919. return rval;
  4920. }
  4921. int
  4922. qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
  4923. {
  4924. int rval;
  4925. struct qla_hw_data *ha = vha->hw;
  4926. mbx_cmd_t mc;
  4927. mbx_cmd_t *mcp = &mc;
  4928. if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
  4929. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
  4930. "Implicit LOGO Unsupported.\n");
  4931. return QLA_FUNCTION_FAILED;
  4932. }
  4933. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
  4934. "Entering %s.\n", __func__);
  4935. /* Perform Implicit LOGO. */
  4936. mcp->mb[0] = MBC_PORT_LOGOUT;
  4937. mcp->mb[1] = fcport->loop_id;
  4938. mcp->mb[10] = BIT_15;
  4939. mcp->out_mb = MBX_10|MBX_1|MBX_0;
  4940. mcp->in_mb = MBX_0;
  4941. mcp->tov = MBX_TOV_SECONDS;
  4942. mcp->flags = 0;
  4943. rval = qla2x00_mailbox_command(vha, mcp);
  4944. if (rval != QLA_SUCCESS)
  4945. ql_dbg(ql_dbg_mbx, vha, 0x113d,
  4946. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  4947. else
  4948. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
  4949. "Done %s.\n", __func__);
  4950. return rval;
  4951. }
  4952. int
  4953. qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
  4954. {
  4955. int rval;
  4956. mbx_cmd_t mc;
  4957. mbx_cmd_t *mcp = &mc;
  4958. struct qla_hw_data *ha = vha->hw;
  4959. unsigned long retry_max_time = jiffies + (2 * HZ);
  4960. if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
  4961. return QLA_FUNCTION_FAILED;
  4962. ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
  4963. retry_rd_reg:
  4964. mcp->mb[0] = MBC_READ_REMOTE_REG;
  4965. mcp->mb[1] = LSW(reg);
  4966. mcp->mb[2] = MSW(reg);
  4967. mcp->out_mb = MBX_2|MBX_1|MBX_0;
  4968. mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
  4969. mcp->tov = MBX_TOV_SECONDS;
  4970. mcp->flags = 0;
  4971. rval = qla2x00_mailbox_command(vha, mcp);
  4972. if (rval != QLA_SUCCESS) {
  4973. ql_dbg(ql_dbg_mbx, vha, 0x114c,
  4974. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  4975. rval, mcp->mb[0], mcp->mb[1]);
  4976. } else {
  4977. *data = (mcp->mb[3] | (mcp->mb[4] << 16));
  4978. if (*data == QLA8XXX_BAD_VALUE) {
  4979. /*
  4980. * During soft-reset CAMRAM register reads might
  4981. * return 0xbad0bad0. So retry for MAX of 2 sec
  4982. * while reading camram registers.
  4983. */
  4984. if (time_after(jiffies, retry_max_time)) {
  4985. ql_dbg(ql_dbg_mbx, vha, 0x1141,
  4986. "Failure to read CAMRAM register. "
  4987. "data=0x%x.\n", *data);
  4988. return QLA_FUNCTION_FAILED;
  4989. }
  4990. msleep(100);
  4991. goto retry_rd_reg;
  4992. }
  4993. ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
  4994. }
  4995. return rval;
  4996. }
  4997. int
  4998. qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
  4999. {
  5000. int rval;
  5001. mbx_cmd_t mc;
  5002. mbx_cmd_t *mcp = &mc;
  5003. struct qla_hw_data *ha = vha->hw;
  5004. if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
  5005. return QLA_FUNCTION_FAILED;
  5006. ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
  5007. mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
  5008. mcp->out_mb = MBX_0;
  5009. mcp->in_mb = MBX_1|MBX_0;
  5010. mcp->tov = MBX_TOV_SECONDS;
  5011. mcp->flags = 0;
  5012. rval = qla2x00_mailbox_command(vha, mcp);
  5013. if (rval != QLA_SUCCESS) {
  5014. ql_dbg(ql_dbg_mbx, vha, 0x1144,
  5015. "Failed=%x mb[0]=%x mb[1]=%x.\n",
  5016. rval, mcp->mb[0], mcp->mb[1]);
  5017. ha->isp_ops->fw_dump(vha, 0);
  5018. } else {
  5019. ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
  5020. }
  5021. return rval;
  5022. }
  5023. int
  5024. qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
  5025. uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
  5026. {
  5027. int rval;
  5028. mbx_cmd_t mc;
  5029. mbx_cmd_t *mcp = &mc;
  5030. uint8_t subcode = (uint8_t)options;
  5031. struct qla_hw_data *ha = vha->hw;
  5032. if (!IS_QLA8031(ha))
  5033. return QLA_FUNCTION_FAILED;
  5034. ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
  5035. mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
  5036. mcp->mb[1] = options;
  5037. mcp->out_mb = MBX_1|MBX_0;
  5038. if (subcode & BIT_2) {
  5039. mcp->mb[2] = LSW(start_addr);
  5040. mcp->mb[3] = MSW(start_addr);
  5041. mcp->mb[4] = LSW(end_addr);
  5042. mcp->mb[5] = MSW(end_addr);
  5043. mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
  5044. }
  5045. mcp->in_mb = MBX_2|MBX_1|MBX_0;
  5046. if (!(subcode & (BIT_2 | BIT_5)))
  5047. mcp->in_mb |= MBX_4|MBX_3;
  5048. mcp->tov = MBX_TOV_SECONDS;
  5049. mcp->flags = 0;
  5050. rval = qla2x00_mailbox_command(vha, mcp);
  5051. if (rval != QLA_SUCCESS) {
  5052. ql_dbg(ql_dbg_mbx, vha, 0x1147,
  5053. "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
  5054. rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
  5055. mcp->mb[4]);
  5056. ha->isp_ops->fw_dump(vha, 0);
  5057. } else {
  5058. if (subcode & BIT_5)
  5059. *sector_size = mcp->mb[1];
  5060. else if (subcode & (BIT_6 | BIT_7)) {
  5061. ql_dbg(ql_dbg_mbx, vha, 0x1148,
  5062. "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
  5063. } else if (subcode & (BIT_3 | BIT_4)) {
  5064. ql_dbg(ql_dbg_mbx, vha, 0x1149,
  5065. "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
  5066. }
  5067. ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
  5068. }
  5069. return rval;
  5070. }
  5071. int
  5072. qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
  5073. uint32_t size)
  5074. {
  5075. int rval;
  5076. mbx_cmd_t mc;
  5077. mbx_cmd_t *mcp = &mc;
  5078. if (!IS_MCTP_CAPABLE(vha->hw))
  5079. return QLA_FUNCTION_FAILED;
  5080. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
  5081. "Entered %s.\n", __func__);
  5082. mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
  5083. mcp->mb[1] = LSW(addr);
  5084. mcp->mb[2] = MSW(req_dma);
  5085. mcp->mb[3] = LSW(req_dma);
  5086. mcp->mb[4] = MSW(size);
  5087. mcp->mb[5] = LSW(size);
  5088. mcp->mb[6] = MSW(MSD(req_dma));
  5089. mcp->mb[7] = LSW(MSD(req_dma));
  5090. mcp->mb[8] = MSW(addr);
  5091. /* Setting RAM ID to valid */
  5092. /* For MCTP RAM ID is 0x40 */
  5093. mcp->mb[10] = BIT_7 | 0x40;
  5094. mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
  5095. MBX_0;
  5096. mcp->in_mb = MBX_0;
  5097. mcp->tov = MBX_TOV_SECONDS;
  5098. mcp->flags = 0;
  5099. rval = qla2x00_mailbox_command(vha, mcp);
  5100. if (rval != QLA_SUCCESS) {
  5101. ql_dbg(ql_dbg_mbx, vha, 0x114e,
  5102. "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
  5103. } else {
  5104. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
  5105. "Done %s.\n", __func__);
  5106. }
  5107. return rval;
  5108. }
  5109. int
  5110. qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
  5111. void *dd_buf, uint size, uint options)
  5112. {
  5113. int rval;
  5114. mbx_cmd_t mc;
  5115. mbx_cmd_t *mcp = &mc;
  5116. dma_addr_t dd_dma;
  5117. if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
  5118. return QLA_FUNCTION_FAILED;
  5119. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
  5120. "Entered %s.\n", __func__);
  5121. dd_dma = dma_map_single(&vha->hw->pdev->dev,
  5122. dd_buf, size, DMA_FROM_DEVICE);
  5123. if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
  5124. ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
  5125. return QLA_MEMORY_ALLOC_FAILED;
  5126. }
  5127. memset(dd_buf, 0, size);
  5128. mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
  5129. mcp->mb[1] = options;
  5130. mcp->mb[2] = MSW(LSD(dd_dma));
  5131. mcp->mb[3] = LSW(LSD(dd_dma));
  5132. mcp->mb[6] = MSW(MSD(dd_dma));
  5133. mcp->mb[7] = LSW(MSD(dd_dma));
  5134. mcp->mb[8] = size;
  5135. mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
  5136. mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
  5137. mcp->buf_size = size;
  5138. mcp->flags = MBX_DMA_IN;
  5139. mcp->tov = MBX_TOV_SECONDS * 4;
  5140. rval = qla2x00_mailbox_command(vha, mcp);
  5141. if (rval != QLA_SUCCESS) {
  5142. ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
  5143. } else {
  5144. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
  5145. "Done %s.\n", __func__);
  5146. }
  5147. dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
  5148. size, DMA_FROM_DEVICE);
  5149. return rval;
  5150. }
  5151. static void qla2x00_async_mb_sp_done(void *s, int res)
  5152. {
  5153. struct srb *sp = s;
  5154. sp->u.iocb_cmd.u.mbx.rc = res;
  5155. complete(&sp->u.iocb_cmd.u.mbx.comp);
  5156. /* don't free sp here. Let the caller do the free */
  5157. }
  5158. /*
  5159. * This mailbox uses the iocb interface to send MB command.
  5160. * This allows non-critial (non chip setup) command to go
  5161. * out in parrallel.
  5162. */
  5163. int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
  5164. {
  5165. int rval = QLA_FUNCTION_FAILED;
  5166. srb_t *sp;
  5167. struct srb_iocb *c;
  5168. if (!vha->hw->flags.fw_started)
  5169. goto done;
  5170. sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
  5171. if (!sp)
  5172. goto done;
  5173. sp->type = SRB_MB_IOCB;
  5174. sp->name = mb_to_str(mcp->mb[0]);
  5175. qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
  5176. memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
  5177. c = &sp->u.iocb_cmd;
  5178. c->timeout = qla2x00_async_iocb_timeout;
  5179. init_completion(&c->u.mbx.comp);
  5180. sp->done = qla2x00_async_mb_sp_done;
  5181. rval = qla2x00_start_sp(sp);
  5182. if (rval != QLA_SUCCESS) {
  5183. ql_dbg(ql_dbg_mbx, vha, 0x1018,
  5184. "%s: %s Failed submission. %x.\n",
  5185. __func__, sp->name, rval);
  5186. goto done_free_sp;
  5187. }
  5188. ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
  5189. sp->name, sp->handle);
  5190. wait_for_completion(&c->u.mbx.comp);
  5191. memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
  5192. rval = c->u.mbx.rc;
  5193. switch (rval) {
  5194. case QLA_FUNCTION_TIMEOUT:
  5195. ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
  5196. __func__, sp->name, rval);
  5197. break;
  5198. case QLA_SUCCESS:
  5199. ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
  5200. __func__, sp->name);
  5201. sp->free(sp);
  5202. break;
  5203. default:
  5204. ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
  5205. __func__, sp->name, rval);
  5206. sp->free(sp);
  5207. break;
  5208. }
  5209. return rval;
  5210. done_free_sp:
  5211. sp->free(sp);
  5212. done:
  5213. return rval;
  5214. }
  5215. /*
  5216. * qla24xx_gpdb_wait
  5217. * NOTE: Do not call this routine from DPC thread
  5218. */
  5219. int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
  5220. {
  5221. int rval = QLA_FUNCTION_FAILED;
  5222. dma_addr_t pd_dma;
  5223. struct port_database_24xx *pd;
  5224. struct qla_hw_data *ha = vha->hw;
  5225. mbx_cmd_t mc;
  5226. if (!vha->hw->flags.fw_started)
  5227. goto done;
  5228. pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
  5229. if (pd == NULL) {
  5230. ql_log(ql_log_warn, vha, 0xd047,
  5231. "Failed to allocate port database structure.\n");
  5232. goto done_free_sp;
  5233. }
  5234. memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
  5235. memset(&mc, 0, sizeof(mc));
  5236. mc.mb[0] = MBC_GET_PORT_DATABASE;
  5237. mc.mb[1] = cpu_to_le16(fcport->loop_id);
  5238. mc.mb[2] = MSW(pd_dma);
  5239. mc.mb[3] = LSW(pd_dma);
  5240. mc.mb[6] = MSW(MSD(pd_dma));
  5241. mc.mb[7] = LSW(MSD(pd_dma));
  5242. mc.mb[9] = cpu_to_le16(vha->vp_idx);
  5243. mc.mb[10] = cpu_to_le16((uint16_t)opt);
  5244. rval = qla24xx_send_mb_cmd(vha, &mc);
  5245. if (rval != QLA_SUCCESS) {
  5246. ql_dbg(ql_dbg_mbx, vha, 0x1193,
  5247. "%s: %8phC fail\n", __func__, fcport->port_name);
  5248. goto done_free_sp;
  5249. }
  5250. rval = __qla24xx_parse_gpdb(vha, fcport, pd);
  5251. ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
  5252. __func__, fcport->port_name);
  5253. done_free_sp:
  5254. if (pd)
  5255. dma_pool_free(ha->s_dma_pool, pd, pd_dma);
  5256. done:
  5257. return rval;
  5258. }
  5259. int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
  5260. struct port_database_24xx *pd)
  5261. {
  5262. int rval = QLA_SUCCESS;
  5263. uint64_t zero = 0;
  5264. u8 current_login_state, last_login_state;
  5265. if (fcport->fc4f_nvme) {
  5266. current_login_state = pd->current_login_state >> 4;
  5267. last_login_state = pd->last_login_state >> 4;
  5268. } else {
  5269. current_login_state = pd->current_login_state & 0xf;
  5270. last_login_state = pd->last_login_state & 0xf;
  5271. }
  5272. /* Check for logged in state. */
  5273. if (current_login_state != PDS_PRLI_COMPLETE) {
  5274. ql_dbg(ql_dbg_mbx, vha, 0x119a,
  5275. "Unable to verify login-state (%x/%x) for loop_id %x.\n",
  5276. current_login_state, last_login_state, fcport->loop_id);
  5277. rval = QLA_FUNCTION_FAILED;
  5278. goto gpd_error_out;
  5279. }
  5280. if (fcport->loop_id == FC_NO_LOOP_ID ||
  5281. (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
  5282. memcmp(fcport->port_name, pd->port_name, 8))) {
  5283. /* We lost the device mid way. */
  5284. rval = QLA_NOT_LOGGED_IN;
  5285. goto gpd_error_out;
  5286. }
  5287. /* Names are little-endian. */
  5288. memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
  5289. memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
  5290. /* Get port_id of device. */
  5291. fcport->d_id.b.domain = pd->port_id[0];
  5292. fcport->d_id.b.area = pd->port_id[1];
  5293. fcport->d_id.b.al_pa = pd->port_id[2];
  5294. fcport->d_id.b.rsvd_1 = 0;
  5295. if (fcport->fc4f_nvme) {
  5296. fcport->nvme_prli_service_param =
  5297. pd->prli_nvme_svc_param_word_3;
  5298. fcport->port_type = FCT_NVME;
  5299. } else {
  5300. /* If not target must be initiator or unknown type. */
  5301. if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
  5302. fcport->port_type = FCT_INITIATOR;
  5303. else
  5304. fcport->port_type = FCT_TARGET;
  5305. }
  5306. /* Passback COS information. */
  5307. fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
  5308. FC_COS_CLASS2 : FC_COS_CLASS3;
  5309. if (pd->prli_svc_param_word_3[0] & BIT_7) {
  5310. fcport->flags |= FCF_CONF_COMP_SUPPORTED;
  5311. fcport->conf_compl_supported = 1;
  5312. }
  5313. gpd_error_out:
  5314. return rval;
  5315. }
  5316. /*
  5317. * qla24xx_gidlist__wait
  5318. * NOTE: don't call this routine from DPC thread.
  5319. */
  5320. int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
  5321. void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
  5322. {
  5323. int rval = QLA_FUNCTION_FAILED;
  5324. mbx_cmd_t mc;
  5325. if (!vha->hw->flags.fw_started)
  5326. goto done;
  5327. memset(&mc, 0, sizeof(mc));
  5328. mc.mb[0] = MBC_GET_ID_LIST;
  5329. mc.mb[2] = MSW(id_list_dma);
  5330. mc.mb[3] = LSW(id_list_dma);
  5331. mc.mb[6] = MSW(MSD(id_list_dma));
  5332. mc.mb[7] = LSW(MSD(id_list_dma));
  5333. mc.mb[8] = 0;
  5334. mc.mb[9] = cpu_to_le16(vha->vp_idx);
  5335. rval = qla24xx_send_mb_cmd(vha, &mc);
  5336. if (rval != QLA_SUCCESS) {
  5337. ql_dbg(ql_dbg_mbx, vha, 0x119b,
  5338. "%s: fail\n", __func__);
  5339. } else {
  5340. *entries = mc.mb[1];
  5341. ql_dbg(ql_dbg_mbx, vha, 0x119c,
  5342. "%s: done\n", __func__);
  5343. }
  5344. done:
  5345. return rval;
  5346. }
  5347. int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
  5348. {
  5349. int rval;
  5350. mbx_cmd_t mc;
  5351. mbx_cmd_t *mcp = &mc;
  5352. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
  5353. "Entered %s\n", __func__);
  5354. memset(mcp->mb, 0 , sizeof(mcp->mb));
  5355. mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
  5356. mcp->mb[1] = cpu_to_le16(1);
  5357. mcp->mb[2] = cpu_to_le16(value);
  5358. mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
  5359. mcp->in_mb = MBX_2 | MBX_0;
  5360. mcp->tov = MBX_TOV_SECONDS;
  5361. mcp->flags = 0;
  5362. rval = qla2x00_mailbox_command(vha, mcp);
  5363. ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
  5364. (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
  5365. return rval;
  5366. }
  5367. int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
  5368. {
  5369. int rval;
  5370. mbx_cmd_t mc;
  5371. mbx_cmd_t *mcp = &mc;
  5372. ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
  5373. "Entered %s\n", __func__);
  5374. memset(mcp->mb, 0, sizeof(mcp->mb));
  5375. mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
  5376. mcp->mb[1] = cpu_to_le16(0);
  5377. mcp->out_mb = MBX_1 | MBX_0;
  5378. mcp->in_mb = MBX_2 | MBX_0;
  5379. mcp->tov = MBX_TOV_SECONDS;
  5380. mcp->flags = 0;
  5381. rval = qla2x00_mailbox_command(vha, mcp);
  5382. if (rval == QLA_SUCCESS)
  5383. *value = mc.mb[2];
  5384. ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
  5385. (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
  5386. return rval;
  5387. }
  5388. int
  5389. qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
  5390. {
  5391. struct qla_hw_data *ha = vha->hw;
  5392. uint16_t iter, addr, offset;
  5393. dma_addr_t phys_addr;
  5394. int rval, c;
  5395. u8 *sfp_data;
  5396. memset(ha->sfp_data, 0, SFP_DEV_SIZE);
  5397. addr = 0xa0;
  5398. phys_addr = ha->sfp_data_dma;
  5399. sfp_data = ha->sfp_data;
  5400. offset = c = 0;
  5401. for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
  5402. if (iter == 4) {
  5403. /* Skip to next device address. */
  5404. addr = 0xa2;
  5405. offset = 0;
  5406. }
  5407. rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
  5408. addr, offset, SFP_BLOCK_SIZE, BIT_1);
  5409. if (rval != QLA_SUCCESS) {
  5410. ql_log(ql_log_warn, vha, 0x706d,
  5411. "Unable to read SFP data (%x/%x/%x).\n", rval,
  5412. addr, offset);
  5413. return rval;
  5414. }
  5415. if (buf && (c < count)) {
  5416. u16 sz;
  5417. if ((count - c) >= SFP_BLOCK_SIZE)
  5418. sz = SFP_BLOCK_SIZE;
  5419. else
  5420. sz = count - c;
  5421. memcpy(buf, sfp_data, sz);
  5422. buf += SFP_BLOCK_SIZE;
  5423. c += sz;
  5424. }
  5425. phys_addr += SFP_BLOCK_SIZE;
  5426. sfp_data += SFP_BLOCK_SIZE;
  5427. offset += SFP_BLOCK_SIZE;
  5428. }
  5429. return rval;
  5430. }