target_core_transport.c 170 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204
  1. /*******************************************************************************
  2. * Filename: target_core_transport.c
  3. *
  4. * This file contains the Generic Target Engine Core.
  5. *
  6. * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
  7. * Copyright (c) 2005, 2006, 2007 SBE, Inc.
  8. * Copyright (c) 2007-2010 Rising Tide Systems
  9. * Copyright (c) 2008-2010 Linux-iSCSI.org
  10. *
  11. * Nicholas A. Bellinger <nab@kernel.org>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU General Public License
  24. * along with this program; if not, write to the Free Software
  25. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  26. *
  27. ******************************************************************************/
  28. #include <linux/version.h>
  29. #include <linux/net.h>
  30. #include <linux/delay.h>
  31. #include <linux/string.h>
  32. #include <linux/timer.h>
  33. #include <linux/slab.h>
  34. #include <linux/blkdev.h>
  35. #include <linux/spinlock.h>
  36. #include <linux/kthread.h>
  37. #include <linux/in.h>
  38. #include <linux/cdrom.h>
  39. #include <asm/unaligned.h>
  40. #include <net/sock.h>
  41. #include <net/tcp.h>
  42. #include <scsi/scsi.h>
  43. #include <scsi/scsi_cmnd.h>
  44. #include <scsi/scsi_tcq.h>
  45. #include <target/target_core_base.h>
  46. #include <target/target_core_device.h>
  47. #include <target/target_core_tmr.h>
  48. #include <target/target_core_tpg.h>
  49. #include <target/target_core_transport.h>
  50. #include <target/target_core_fabric_ops.h>
  51. #include <target/target_core_configfs.h>
  52. #include "target_core_alua.h"
  53. #include "target_core_hba.h"
  54. #include "target_core_pr.h"
  55. #include "target_core_scdb.h"
  56. #include "target_core_ua.h"
  57. /* #define DEBUG_CDB_HANDLER */
  58. #ifdef DEBUG_CDB_HANDLER
  59. #define DEBUG_CDB_H(x...) printk(KERN_INFO x)
  60. #else
  61. #define DEBUG_CDB_H(x...)
  62. #endif
  63. /* #define DEBUG_CMD_MAP */
  64. #ifdef DEBUG_CMD_MAP
  65. #define DEBUG_CMD_M(x...) printk(KERN_INFO x)
  66. #else
  67. #define DEBUG_CMD_M(x...)
  68. #endif
  69. /* #define DEBUG_MEM_ALLOC */
  70. #ifdef DEBUG_MEM_ALLOC
  71. #define DEBUG_MEM(x...) printk(KERN_INFO x)
  72. #else
  73. #define DEBUG_MEM(x...)
  74. #endif
  75. /* #define DEBUG_MEM2_ALLOC */
  76. #ifdef DEBUG_MEM2_ALLOC
  77. #define DEBUG_MEM2(x...) printk(KERN_INFO x)
  78. #else
  79. #define DEBUG_MEM2(x...)
  80. #endif
  81. /* #define DEBUG_SG_CALC */
  82. #ifdef DEBUG_SG_CALC
  83. #define DEBUG_SC(x...) printk(KERN_INFO x)
  84. #else
  85. #define DEBUG_SC(x...)
  86. #endif
  87. /* #define DEBUG_SE_OBJ */
  88. #ifdef DEBUG_SE_OBJ
  89. #define DEBUG_SO(x...) printk(KERN_INFO x)
  90. #else
  91. #define DEBUG_SO(x...)
  92. #endif
  93. /* #define DEBUG_CMD_VOL */
  94. #ifdef DEBUG_CMD_VOL
  95. #define DEBUG_VOL(x...) printk(KERN_INFO x)
  96. #else
  97. #define DEBUG_VOL(x...)
  98. #endif
  99. /* #define DEBUG_CMD_STOP */
  100. #ifdef DEBUG_CMD_STOP
  101. #define DEBUG_CS(x...) printk(KERN_INFO x)
  102. #else
  103. #define DEBUG_CS(x...)
  104. #endif
  105. /* #define DEBUG_PASSTHROUGH */
  106. #ifdef DEBUG_PASSTHROUGH
  107. #define DEBUG_PT(x...) printk(KERN_INFO x)
  108. #else
  109. #define DEBUG_PT(x...)
  110. #endif
  111. /* #define DEBUG_TASK_STOP */
  112. #ifdef DEBUG_TASK_STOP
  113. #define DEBUG_TS(x...) printk(KERN_INFO x)
  114. #else
  115. #define DEBUG_TS(x...)
  116. #endif
  117. /* #define DEBUG_TRANSPORT_STOP */
  118. #ifdef DEBUG_TRANSPORT_STOP
  119. #define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
  120. #else
  121. #define DEBUG_TRANSPORT_S(x...)
  122. #endif
  123. /* #define DEBUG_TASK_FAILURE */
  124. #ifdef DEBUG_TASK_FAILURE
  125. #define DEBUG_TF(x...) printk(KERN_INFO x)
  126. #else
  127. #define DEBUG_TF(x...)
  128. #endif
  129. /* #define DEBUG_DEV_OFFLINE */
  130. #ifdef DEBUG_DEV_OFFLINE
  131. #define DEBUG_DO(x...) printk(KERN_INFO x)
  132. #else
  133. #define DEBUG_DO(x...)
  134. #endif
  135. /* #define DEBUG_TASK_STATE */
  136. #ifdef DEBUG_TASK_STATE
  137. #define DEBUG_TSTATE(x...) printk(KERN_INFO x)
  138. #else
  139. #define DEBUG_TSTATE(x...)
  140. #endif
  141. /* #define DEBUG_STATUS_THR */
  142. #ifdef DEBUG_STATUS_THR
  143. #define DEBUG_ST(x...) printk(KERN_INFO x)
  144. #else
  145. #define DEBUG_ST(x...)
  146. #endif
  147. /* #define DEBUG_TASK_TIMEOUT */
  148. #ifdef DEBUG_TASK_TIMEOUT
  149. #define DEBUG_TT(x...) printk(KERN_INFO x)
  150. #else
  151. #define DEBUG_TT(x...)
  152. #endif
  153. /* #define DEBUG_GENERIC_REQUEST_FAILURE */
  154. #ifdef DEBUG_GENERIC_REQUEST_FAILURE
  155. #define DEBUG_GRF(x...) printk(KERN_INFO x)
  156. #else
  157. #define DEBUG_GRF(x...)
  158. #endif
  159. /* #define DEBUG_SAM_TASK_ATTRS */
  160. #ifdef DEBUG_SAM_TASK_ATTRS
  161. #define DEBUG_STA(x...) printk(KERN_INFO x)
  162. #else
  163. #define DEBUG_STA(x...)
  164. #endif
  165. struct se_global *se_global;
  166. static struct kmem_cache *se_cmd_cache;
  167. static struct kmem_cache *se_sess_cache;
  168. struct kmem_cache *se_tmr_req_cache;
  169. struct kmem_cache *se_ua_cache;
  170. struct kmem_cache *se_mem_cache;
  171. struct kmem_cache *t10_pr_reg_cache;
  172. struct kmem_cache *t10_alua_lu_gp_cache;
  173. struct kmem_cache *t10_alua_lu_gp_mem_cache;
  174. struct kmem_cache *t10_alua_tg_pt_gp_cache;
  175. struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
  176. /* Used for transport_dev_get_map_*() */
  177. typedef int (*map_func_t)(struct se_task *, u32);
  178. static int transport_generic_write_pending(struct se_cmd *);
  179. static int transport_processing_thread(void *);
  180. static int __transport_execute_tasks(struct se_device *dev);
  181. static void transport_complete_task_attr(struct se_cmd *cmd);
  182. static void transport_direct_request_timeout(struct se_cmd *cmd);
  183. static void transport_free_dev_tasks(struct se_cmd *cmd);
  184. static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
  185. unsigned long long starting_lba, u32 sectors,
  186. enum dma_data_direction data_direction,
  187. struct list_head *mem_list, int set_counts);
  188. static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
  189. u32 dma_size);
  190. static int transport_generic_remove(struct se_cmd *cmd,
  191. int release_to_pool, int session_reinstatement);
  192. static int transport_get_sectors(struct se_cmd *cmd);
  193. static struct list_head *transport_init_se_mem_list(void);
  194. static int transport_map_sg_to_mem(struct se_cmd *cmd,
  195. struct list_head *se_mem_list, void *in_mem,
  196. u32 *se_mem_cnt);
  197. static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
  198. unsigned char *dst, struct list_head *se_mem_list);
  199. static void transport_release_fe_cmd(struct se_cmd *cmd);
  200. static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
  201. struct se_queue_obj *qobj);
  202. static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
  203. static void transport_stop_all_task_timers(struct se_cmd *cmd);
  204. int init_se_global(void)
  205. {
  206. struct se_global *global;
  207. global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
  208. if (!(global)) {
  209. printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
  210. return -1;
  211. }
  212. INIT_LIST_HEAD(&global->g_lu_gps_list);
  213. INIT_LIST_HEAD(&global->g_se_tpg_list);
  214. INIT_LIST_HEAD(&global->g_hba_list);
  215. INIT_LIST_HEAD(&global->g_se_dev_list);
  216. spin_lock_init(&global->g_device_lock);
  217. spin_lock_init(&global->hba_lock);
  218. spin_lock_init(&global->se_tpg_lock);
  219. spin_lock_init(&global->lu_gps_lock);
  220. spin_lock_init(&global->plugin_class_lock);
  221. se_cmd_cache = kmem_cache_create("se_cmd_cache",
  222. sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
  223. if (!(se_cmd_cache)) {
  224. printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
  225. goto out;
  226. }
  227. se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
  228. sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
  229. 0, NULL);
  230. if (!(se_tmr_req_cache)) {
  231. printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
  232. " failed\n");
  233. goto out;
  234. }
  235. se_sess_cache = kmem_cache_create("se_sess_cache",
  236. sizeof(struct se_session), __alignof__(struct se_session),
  237. 0, NULL);
  238. if (!(se_sess_cache)) {
  239. printk(KERN_ERR "kmem_cache_create() for struct se_session"
  240. " failed\n");
  241. goto out;
  242. }
  243. se_ua_cache = kmem_cache_create("se_ua_cache",
  244. sizeof(struct se_ua), __alignof__(struct se_ua),
  245. 0, NULL);
  246. if (!(se_ua_cache)) {
  247. printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
  248. goto out;
  249. }
  250. se_mem_cache = kmem_cache_create("se_mem_cache",
  251. sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
  252. if (!(se_mem_cache)) {
  253. printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
  254. goto out;
  255. }
  256. t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
  257. sizeof(struct t10_pr_registration),
  258. __alignof__(struct t10_pr_registration), 0, NULL);
  259. if (!(t10_pr_reg_cache)) {
  260. printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
  261. " failed\n");
  262. goto out;
  263. }
  264. t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
  265. sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
  266. 0, NULL);
  267. if (!(t10_alua_lu_gp_cache)) {
  268. printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
  269. " failed\n");
  270. goto out;
  271. }
  272. t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
  273. sizeof(struct t10_alua_lu_gp_member),
  274. __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
  275. if (!(t10_alua_lu_gp_mem_cache)) {
  276. printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
  277. "cache failed\n");
  278. goto out;
  279. }
  280. t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
  281. sizeof(struct t10_alua_tg_pt_gp),
  282. __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
  283. if (!(t10_alua_tg_pt_gp_cache)) {
  284. printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
  285. "cache failed\n");
  286. goto out;
  287. }
  288. t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(
  289. "t10_alua_tg_pt_gp_mem_cache",
  290. sizeof(struct t10_alua_tg_pt_gp_member),
  291. __alignof__(struct t10_alua_tg_pt_gp_member),
  292. 0, NULL);
  293. if (!(t10_alua_tg_pt_gp_mem_cache)) {
  294. printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
  295. "mem_t failed\n");
  296. goto out;
  297. }
  298. se_global = global;
  299. return 0;
  300. out:
  301. if (se_cmd_cache)
  302. kmem_cache_destroy(se_cmd_cache);
  303. if (se_tmr_req_cache)
  304. kmem_cache_destroy(se_tmr_req_cache);
  305. if (se_sess_cache)
  306. kmem_cache_destroy(se_sess_cache);
  307. if (se_ua_cache)
  308. kmem_cache_destroy(se_ua_cache);
  309. if (se_mem_cache)
  310. kmem_cache_destroy(se_mem_cache);
  311. if (t10_pr_reg_cache)
  312. kmem_cache_destroy(t10_pr_reg_cache);
  313. if (t10_alua_lu_gp_cache)
  314. kmem_cache_destroy(t10_alua_lu_gp_cache);
  315. if (t10_alua_lu_gp_mem_cache)
  316. kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
  317. if (t10_alua_tg_pt_gp_cache)
  318. kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
  319. if (t10_alua_tg_pt_gp_mem_cache)
  320. kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
  321. kfree(global);
  322. return -1;
  323. }
  324. void release_se_global(void)
  325. {
  326. struct se_global *global;
  327. global = se_global;
  328. if (!(global))
  329. return;
  330. kmem_cache_destroy(se_cmd_cache);
  331. kmem_cache_destroy(se_tmr_req_cache);
  332. kmem_cache_destroy(se_sess_cache);
  333. kmem_cache_destroy(se_ua_cache);
  334. kmem_cache_destroy(se_mem_cache);
  335. kmem_cache_destroy(t10_pr_reg_cache);
  336. kmem_cache_destroy(t10_alua_lu_gp_cache);
  337. kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
  338. kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
  339. kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
  340. kfree(global);
  341. se_global = NULL;
  342. }
  343. /* SCSI statistics table index */
  344. static struct scsi_index_table scsi_index_table;
  345. /*
  346. * Initialize the index table for allocating unique row indexes to various mib
  347. * tables.
  348. */
  349. void init_scsi_index_table(void)
  350. {
  351. memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
  352. spin_lock_init(&scsi_index_table.lock);
  353. }
  354. /*
  355. * Allocate a new row index for the entry type specified
  356. */
  357. u32 scsi_get_new_index(scsi_index_t type)
  358. {
  359. u32 new_index;
  360. if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
  361. printk(KERN_ERR "Invalid index type %d\n", type);
  362. return -EINVAL;
  363. }
  364. spin_lock(&scsi_index_table.lock);
  365. new_index = ++scsi_index_table.scsi_mib_index[type];
  366. if (new_index == 0)
  367. new_index = ++scsi_index_table.scsi_mib_index[type];
  368. spin_unlock(&scsi_index_table.lock);
  369. return new_index;
  370. }
  371. void transport_init_queue_obj(struct se_queue_obj *qobj)
  372. {
  373. atomic_set(&qobj->queue_cnt, 0);
  374. INIT_LIST_HEAD(&qobj->qobj_list);
  375. init_waitqueue_head(&qobj->thread_wq);
  376. spin_lock_init(&qobj->cmd_queue_lock);
  377. }
  378. EXPORT_SYMBOL(transport_init_queue_obj);
  379. static int transport_subsystem_reqmods(void)
  380. {
  381. int ret;
  382. ret = request_module("target_core_iblock");
  383. if (ret != 0)
  384. printk(KERN_ERR "Unable to load target_core_iblock\n");
  385. ret = request_module("target_core_file");
  386. if (ret != 0)
  387. printk(KERN_ERR "Unable to load target_core_file\n");
  388. ret = request_module("target_core_pscsi");
  389. if (ret != 0)
  390. printk(KERN_ERR "Unable to load target_core_pscsi\n");
  391. ret = request_module("target_core_stgt");
  392. if (ret != 0)
  393. printk(KERN_ERR "Unable to load target_core_stgt\n");
  394. return 0;
  395. }
  396. int transport_subsystem_check_init(void)
  397. {
  398. if (se_global->g_sub_api_initialized)
  399. return 0;
  400. /*
  401. * Request the loading of known TCM subsystem plugins..
  402. */
  403. if (transport_subsystem_reqmods() < 0)
  404. return -1;
  405. se_global->g_sub_api_initialized = 1;
  406. return 0;
  407. }
  408. struct se_session *transport_init_session(void)
  409. {
  410. struct se_session *se_sess;
  411. se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
  412. if (!(se_sess)) {
  413. printk(KERN_ERR "Unable to allocate struct se_session from"
  414. " se_sess_cache\n");
  415. return ERR_PTR(-ENOMEM);
  416. }
  417. INIT_LIST_HEAD(&se_sess->sess_list);
  418. INIT_LIST_HEAD(&se_sess->sess_acl_list);
  419. return se_sess;
  420. }
  421. EXPORT_SYMBOL(transport_init_session);
  422. /*
  423. * Called with spin_lock_bh(&struct se_portal_group->session_lock called.
  424. */
  425. void __transport_register_session(
  426. struct se_portal_group *se_tpg,
  427. struct se_node_acl *se_nacl,
  428. struct se_session *se_sess,
  429. void *fabric_sess_ptr)
  430. {
  431. unsigned char buf[PR_REG_ISID_LEN];
  432. se_sess->se_tpg = se_tpg;
  433. se_sess->fabric_sess_ptr = fabric_sess_ptr;
  434. /*
  435. * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
  436. *
  437. * Only set for struct se_session's that will actually be moving I/O.
  438. * eg: *NOT* discovery sessions.
  439. */
  440. if (se_nacl) {
  441. /*
  442. * If the fabric module supports an ISID based TransportID,
  443. * save this value in binary from the fabric I_T Nexus now.
  444. */
  445. if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
  446. memset(&buf[0], 0, PR_REG_ISID_LEN);
  447. TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
  448. &buf[0], PR_REG_ISID_LEN);
  449. se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
  450. }
  451. spin_lock_irq(&se_nacl->nacl_sess_lock);
  452. /*
  453. * The se_nacl->nacl_sess pointer will be set to the
  454. * last active I_T Nexus for each struct se_node_acl.
  455. */
  456. se_nacl->nacl_sess = se_sess;
  457. list_add_tail(&se_sess->sess_acl_list,
  458. &se_nacl->acl_sess_list);
  459. spin_unlock_irq(&se_nacl->nacl_sess_lock);
  460. }
  461. list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
  462. printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
  463. TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
  464. }
  465. EXPORT_SYMBOL(__transport_register_session);
  466. void transport_register_session(
  467. struct se_portal_group *se_tpg,
  468. struct se_node_acl *se_nacl,
  469. struct se_session *se_sess,
  470. void *fabric_sess_ptr)
  471. {
  472. spin_lock_bh(&se_tpg->session_lock);
  473. __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
  474. spin_unlock_bh(&se_tpg->session_lock);
  475. }
  476. EXPORT_SYMBOL(transport_register_session);
  477. void transport_deregister_session_configfs(struct se_session *se_sess)
  478. {
  479. struct se_node_acl *se_nacl;
  480. unsigned long flags;
  481. /*
  482. * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
  483. */
  484. se_nacl = se_sess->se_node_acl;
  485. if ((se_nacl)) {
  486. spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
  487. list_del(&se_sess->sess_acl_list);
  488. /*
  489. * If the session list is empty, then clear the pointer.
  490. * Otherwise, set the struct se_session pointer from the tail
  491. * element of the per struct se_node_acl active session list.
  492. */
  493. if (list_empty(&se_nacl->acl_sess_list))
  494. se_nacl->nacl_sess = NULL;
  495. else {
  496. se_nacl->nacl_sess = container_of(
  497. se_nacl->acl_sess_list.prev,
  498. struct se_session, sess_acl_list);
  499. }
  500. spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
  501. }
  502. }
  503. EXPORT_SYMBOL(transport_deregister_session_configfs);
  504. void transport_free_session(struct se_session *se_sess)
  505. {
  506. kmem_cache_free(se_sess_cache, se_sess);
  507. }
  508. EXPORT_SYMBOL(transport_free_session);
  509. void transport_deregister_session(struct se_session *se_sess)
  510. {
  511. struct se_portal_group *se_tpg = se_sess->se_tpg;
  512. struct se_node_acl *se_nacl;
  513. if (!(se_tpg)) {
  514. transport_free_session(se_sess);
  515. return;
  516. }
  517. spin_lock_bh(&se_tpg->session_lock);
  518. list_del(&se_sess->sess_list);
  519. se_sess->se_tpg = NULL;
  520. se_sess->fabric_sess_ptr = NULL;
  521. spin_unlock_bh(&se_tpg->session_lock);
  522. /*
  523. * Determine if we need to do extra work for this initiator node's
  524. * struct se_node_acl if it had been previously dynamically generated.
  525. */
  526. se_nacl = se_sess->se_node_acl;
  527. if ((se_nacl)) {
  528. spin_lock_bh(&se_tpg->acl_node_lock);
  529. if (se_nacl->dynamic_node_acl) {
  530. if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
  531. se_tpg))) {
  532. list_del(&se_nacl->acl_list);
  533. se_tpg->num_node_acls--;
  534. spin_unlock_bh(&se_tpg->acl_node_lock);
  535. core_tpg_wait_for_nacl_pr_ref(se_nacl);
  536. core_free_device_list_for_node(se_nacl, se_tpg);
  537. TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
  538. se_nacl);
  539. spin_lock_bh(&se_tpg->acl_node_lock);
  540. }
  541. }
  542. spin_unlock_bh(&se_tpg->acl_node_lock);
  543. }
  544. transport_free_session(se_sess);
  545. printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
  546. TPG_TFO(se_tpg)->get_fabric_name());
  547. }
  548. EXPORT_SYMBOL(transport_deregister_session);
  549. /*
  550. * Called with T_TASK(cmd)->t_state_lock held.
  551. */
  552. static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
  553. {
  554. struct se_device *dev;
  555. struct se_task *task;
  556. unsigned long flags;
  557. if (!T_TASK(cmd))
  558. return;
  559. list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
  560. dev = task->se_dev;
  561. if (!(dev))
  562. continue;
  563. if (atomic_read(&task->task_active))
  564. continue;
  565. if (!(atomic_read(&task->task_state_active)))
  566. continue;
  567. spin_lock_irqsave(&dev->execute_task_lock, flags);
  568. list_del(&task->t_state_list);
  569. DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
  570. CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
  571. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  572. atomic_set(&task->task_state_active, 0);
  573. atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
  574. }
  575. }
  576. /* transport_cmd_check_stop():
  577. *
  578. * 'transport_off = 1' determines if t_transport_active should be cleared.
  579. * 'transport_off = 2' determines if task_dev_state should be removed.
  580. *
  581. * A non-zero u8 t_state sets cmd->t_state.
  582. * Returns 1 when command is stopped, else 0.
  583. */
  584. static int transport_cmd_check_stop(
  585. struct se_cmd *cmd,
  586. int transport_off,
  587. u8 t_state)
  588. {
  589. unsigned long flags;
  590. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  591. /*
  592. * Determine if IOCTL context caller in requesting the stopping of this
  593. * command for LUN shutdown purposes.
  594. */
  595. if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
  596. DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
  597. " == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
  598. CMD_TFO(cmd)->get_task_tag(cmd));
  599. cmd->deferred_t_state = cmd->t_state;
  600. cmd->t_state = TRANSPORT_DEFERRED_CMD;
  601. atomic_set(&T_TASK(cmd)->t_transport_active, 0);
  602. if (transport_off == 2)
  603. transport_all_task_dev_remove_state(cmd);
  604. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  605. complete(&T_TASK(cmd)->transport_lun_stop_comp);
  606. return 1;
  607. }
  608. /*
  609. * Determine if frontend context caller is requesting the stopping of
  610. * this command for frontend excpections.
  611. */
  612. if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
  613. DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
  614. " TRUE for ITT: 0x%08x\n", __func__, __LINE__,
  615. CMD_TFO(cmd)->get_task_tag(cmd));
  616. cmd->deferred_t_state = cmd->t_state;
  617. cmd->t_state = TRANSPORT_DEFERRED_CMD;
  618. if (transport_off == 2)
  619. transport_all_task_dev_remove_state(cmd);
  620. /*
  621. * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
  622. * to FE.
  623. */
  624. if (transport_off == 2)
  625. cmd->se_lun = NULL;
  626. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  627. complete(&T_TASK(cmd)->t_transport_stop_comp);
  628. return 1;
  629. }
  630. if (transport_off) {
  631. atomic_set(&T_TASK(cmd)->t_transport_active, 0);
  632. if (transport_off == 2) {
  633. transport_all_task_dev_remove_state(cmd);
  634. /*
  635. * Clear struct se_cmd->se_lun before the transport_off == 2
  636. * handoff to fabric module.
  637. */
  638. cmd->se_lun = NULL;
  639. /*
  640. * Some fabric modules like tcm_loop can release
  641. * their internally allocated I/O reference now and
  642. * struct se_cmd now.
  643. */
  644. if (CMD_TFO(cmd)->check_stop_free != NULL) {
  645. spin_unlock_irqrestore(
  646. &T_TASK(cmd)->t_state_lock, flags);
  647. CMD_TFO(cmd)->check_stop_free(cmd);
  648. return 1;
  649. }
  650. }
  651. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  652. return 0;
  653. } else if (t_state)
  654. cmd->t_state = t_state;
  655. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  656. return 0;
  657. }
  658. static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
  659. {
  660. return transport_cmd_check_stop(cmd, 2, 0);
  661. }
  662. static void transport_lun_remove_cmd(struct se_cmd *cmd)
  663. {
  664. struct se_lun *lun = SE_LUN(cmd);
  665. unsigned long flags;
  666. if (!lun)
  667. return;
  668. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  669. if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
  670. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  671. goto check_lun;
  672. }
  673. atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
  674. transport_all_task_dev_remove_state(cmd);
  675. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  676. check_lun:
  677. spin_lock_irqsave(&lun->lun_cmd_lock, flags);
  678. if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
  679. list_del(&cmd->se_lun_list);
  680. atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
  681. #if 0
  682. printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
  683. CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
  684. #endif
  685. }
  686. spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
  687. }
  688. void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
  689. {
  690. transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
  691. transport_lun_remove_cmd(cmd);
  692. if (transport_cmd_check_stop_to_fabric(cmd))
  693. return;
  694. if (remove)
  695. transport_generic_remove(cmd, 0, 0);
  696. }
  697. void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
  698. {
  699. transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
  700. if (transport_cmd_check_stop_to_fabric(cmd))
  701. return;
  702. transport_generic_remove(cmd, 0, 0);
  703. }
  704. static int transport_add_cmd_to_queue(
  705. struct se_cmd *cmd,
  706. int t_state)
  707. {
  708. struct se_device *dev = cmd->se_dev;
  709. struct se_queue_obj *qobj = dev->dev_queue_obj;
  710. struct se_queue_req *qr;
  711. unsigned long flags;
  712. qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
  713. if (!(qr)) {
  714. printk(KERN_ERR "Unable to allocate memory for"
  715. " struct se_queue_req\n");
  716. return -1;
  717. }
  718. INIT_LIST_HEAD(&qr->qr_list);
  719. qr->cmd = (void *)cmd;
  720. qr->state = t_state;
  721. if (t_state) {
  722. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  723. cmd->t_state = t_state;
  724. atomic_set(&T_TASK(cmd)->t_transport_active, 1);
  725. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  726. }
  727. spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
  728. list_add_tail(&qr->qr_list, &qobj->qobj_list);
  729. atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
  730. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  731. atomic_inc(&qobj->queue_cnt);
  732. wake_up_interruptible(&qobj->thread_wq);
  733. return 0;
  734. }
  735. /*
  736. * Called with struct se_queue_obj->cmd_queue_lock held.
  737. */
  738. static struct se_queue_req *
  739. __transport_get_qr_from_queue(struct se_queue_obj *qobj)
  740. {
  741. struct se_cmd *cmd;
  742. struct se_queue_req *qr = NULL;
  743. if (list_empty(&qobj->qobj_list))
  744. return NULL;
  745. list_for_each_entry(qr, &qobj->qobj_list, qr_list)
  746. break;
  747. if (qr->cmd) {
  748. cmd = (struct se_cmd *)qr->cmd;
  749. atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
  750. }
  751. list_del(&qr->qr_list);
  752. atomic_dec(&qobj->queue_cnt);
  753. return qr;
  754. }
  755. static struct se_queue_req *
  756. transport_get_qr_from_queue(struct se_queue_obj *qobj)
  757. {
  758. struct se_cmd *cmd;
  759. struct se_queue_req *qr;
  760. unsigned long flags;
  761. spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
  762. if (list_empty(&qobj->qobj_list)) {
  763. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  764. return NULL;
  765. }
  766. list_for_each_entry(qr, &qobj->qobj_list, qr_list)
  767. break;
  768. if (qr->cmd) {
  769. cmd = (struct se_cmd *)qr->cmd;
  770. atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
  771. }
  772. list_del(&qr->qr_list);
  773. atomic_dec(&qobj->queue_cnt);
  774. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  775. return qr;
  776. }
  777. static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
  778. struct se_queue_obj *qobj)
  779. {
  780. struct se_cmd *q_cmd;
  781. struct se_queue_req *qr = NULL, *qr_p = NULL;
  782. unsigned long flags;
  783. spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
  784. if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
  785. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  786. return;
  787. }
  788. list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
  789. q_cmd = (struct se_cmd *)qr->cmd;
  790. if (q_cmd != cmd)
  791. continue;
  792. atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
  793. atomic_dec(&qobj->queue_cnt);
  794. list_del(&qr->qr_list);
  795. kfree(qr);
  796. }
  797. spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
  798. if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
  799. printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
  800. CMD_TFO(cmd)->get_task_tag(cmd),
  801. atomic_read(&T_TASK(cmd)->t_transport_queue_active));
  802. }
  803. }
  804. /*
  805. * Completion function used by TCM subsystem plugins (such as FILEIO)
  806. * for queueing up response from struct se_subsystem_api->do_task()
  807. */
  808. void transport_complete_sync_cache(struct se_cmd *cmd, int good)
  809. {
  810. struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
  811. struct se_task, t_list);
  812. if (good) {
  813. cmd->scsi_status = SAM_STAT_GOOD;
  814. task->task_scsi_status = GOOD;
  815. } else {
  816. task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
  817. task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
  818. TASK_CMD(task)->transport_error_status =
  819. PYX_TRANSPORT_ILLEGAL_REQUEST;
  820. }
  821. transport_complete_task(task, good);
  822. }
  823. EXPORT_SYMBOL(transport_complete_sync_cache);
  824. /* transport_complete_task():
  825. *
  826. * Called from interrupt and non interrupt context depending
  827. * on the transport plugin.
  828. */
  829. void transport_complete_task(struct se_task *task, int success)
  830. {
  831. struct se_cmd *cmd = TASK_CMD(task);
  832. struct se_device *dev = task->se_dev;
  833. int t_state;
  834. unsigned long flags;
  835. #if 0
  836. printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
  837. T_TASK(cmd)->t_task_cdb[0], dev);
  838. #endif
  839. if (dev) {
  840. spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
  841. atomic_inc(&dev->depth_left);
  842. atomic_inc(&SE_HBA(dev)->left_queue_depth);
  843. spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
  844. }
  845. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  846. atomic_set(&task->task_active, 0);
  847. /*
  848. * See if any sense data exists, if so set the TASK_SENSE flag.
  849. * Also check for any other post completion work that needs to be
  850. * done by the plugins.
  851. */
  852. if (dev && dev->transport->transport_complete) {
  853. if (dev->transport->transport_complete(task) != 0) {
  854. cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
  855. task->task_sense = 1;
  856. success = 1;
  857. }
  858. }
  859. /*
  860. * See if we are waiting for outstanding struct se_task
  861. * to complete for an exception condition
  862. */
  863. if (atomic_read(&task->task_stop)) {
  864. /*
  865. * Decrement T_TASK(cmd)->t_se_count if this task had
  866. * previously thrown its timeout exception handler.
  867. */
  868. if (atomic_read(&task->task_timeout)) {
  869. atomic_dec(&T_TASK(cmd)->t_se_count);
  870. atomic_set(&task->task_timeout, 0);
  871. }
  872. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  873. complete(&task->task_stop_comp);
  874. return;
  875. }
  876. /*
  877. * If the task's timeout handler has fired, use the t_task_cdbs_timeout
  878. * left counter to determine when the struct se_cmd is ready to be queued to
  879. * the processing thread.
  880. */
  881. if (atomic_read(&task->task_timeout)) {
  882. if (!(atomic_dec_and_test(
  883. &T_TASK(cmd)->t_task_cdbs_timeout_left))) {
  884. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
  885. flags);
  886. return;
  887. }
  888. t_state = TRANSPORT_COMPLETE_TIMEOUT;
  889. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  890. transport_add_cmd_to_queue(cmd, t_state);
  891. return;
  892. }
  893. atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
  894. /*
  895. * Decrement the outstanding t_task_cdbs_left count. The last
  896. * struct se_task from struct se_cmd will complete itself into the
  897. * device queue depending upon int success.
  898. */
  899. if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
  900. if (!success)
  901. T_TASK(cmd)->t_tasks_failed = 1;
  902. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  903. return;
  904. }
  905. if (!success || T_TASK(cmd)->t_tasks_failed) {
  906. t_state = TRANSPORT_COMPLETE_FAILURE;
  907. if (!task->task_error_status) {
  908. task->task_error_status =
  909. PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
  910. cmd->transport_error_status =
  911. PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
  912. }
  913. } else {
  914. atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
  915. t_state = TRANSPORT_COMPLETE_OK;
  916. }
  917. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  918. transport_add_cmd_to_queue(cmd, t_state);
  919. }
  920. EXPORT_SYMBOL(transport_complete_task);
  921. /*
  922. * Called by transport_add_tasks_from_cmd() once a struct se_cmd's
  923. * struct se_task list are ready to be added to the active execution list
  924. * struct se_device
  925. * Called with se_dev_t->execute_task_lock called.
  926. */
  927. static inline int transport_add_task_check_sam_attr(
  928. struct se_task *task,
  929. struct se_task *task_prev,
  930. struct se_device *dev)
  931. {
  932. /*
  933. * No SAM Task attribute emulation enabled, add to tail of
  934. * execution queue
  935. */
  936. if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED) {
  937. list_add_tail(&task->t_execute_list, &dev->execute_task_list);
  938. return 0;
  939. }
  940. /*
  941. * HEAD_OF_QUEUE attribute for received CDB, which means
  942. * the first task that is associated with a struct se_cmd goes to
  943. * head of the struct se_device->execute_task_list, and task_prev
  944. * after that for each subsequent task
  945. */
  946. if (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG) {
  947. list_add(&task->t_execute_list,
  948. (task_prev != NULL) ?
  949. &task_prev->t_execute_list :
  950. &dev->execute_task_list);
  951. DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
  952. " in execution queue\n",
  953. T_TASK(task->task_se_cmd)->t_task_cdb[0]);
  954. return 1;
  955. }
  956. /*
  957. * For ORDERED, SIMPLE or UNTAGGED attribute tasks once they have been
  958. * transitioned from Dermant -> Active state, and are added to the end
  959. * of the struct se_device->execute_task_list
  960. */
  961. list_add_tail(&task->t_execute_list, &dev->execute_task_list);
  962. return 0;
  963. }
  964. /* __transport_add_task_to_execute_queue():
  965. *
  966. * Called with se_dev_t->execute_task_lock called.
  967. */
  968. static void __transport_add_task_to_execute_queue(
  969. struct se_task *task,
  970. struct se_task *task_prev,
  971. struct se_device *dev)
  972. {
  973. int head_of_queue;
  974. head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
  975. atomic_inc(&dev->execute_tasks);
  976. if (atomic_read(&task->task_state_active))
  977. return;
  978. /*
  979. * Determine if this task needs to go to HEAD_OF_QUEUE for the
  980. * state list as well. Running with SAM Task Attribute emulation
  981. * will always return head_of_queue == 0 here
  982. */
  983. if (head_of_queue)
  984. list_add(&task->t_state_list, (task_prev) ?
  985. &task_prev->t_state_list :
  986. &dev->state_task_list);
  987. else
  988. list_add_tail(&task->t_state_list, &dev->state_task_list);
  989. atomic_set(&task->task_state_active, 1);
  990. DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
  991. CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
  992. task, dev);
  993. }
  994. static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
  995. {
  996. struct se_device *dev;
  997. struct se_task *task;
  998. unsigned long flags;
  999. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  1000. list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
  1001. dev = task->se_dev;
  1002. if (atomic_read(&task->task_state_active))
  1003. continue;
  1004. spin_lock(&dev->execute_task_lock);
  1005. list_add_tail(&task->t_state_list, &dev->state_task_list);
  1006. atomic_set(&task->task_state_active, 1);
  1007. DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
  1008. CMD_TFO(task->task_se_cmd)->get_task_tag(
  1009. task->task_se_cmd), task, dev);
  1010. spin_unlock(&dev->execute_task_lock);
  1011. }
  1012. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  1013. }
  1014. static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
  1015. {
  1016. struct se_device *dev = SE_DEV(cmd);
  1017. struct se_task *task, *task_prev = NULL;
  1018. unsigned long flags;
  1019. spin_lock_irqsave(&dev->execute_task_lock, flags);
  1020. list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
  1021. if (atomic_read(&task->task_execute_queue))
  1022. continue;
  1023. /*
  1024. * __transport_add_task_to_execute_queue() handles the
  1025. * SAM Task Attribute emulation if enabled
  1026. */
  1027. __transport_add_task_to_execute_queue(task, task_prev, dev);
  1028. atomic_set(&task->task_execute_queue, 1);
  1029. task_prev = task;
  1030. }
  1031. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  1032. return;
  1033. }
  1034. /* transport_get_task_from_execute_queue():
  1035. *
  1036. * Called with dev->execute_task_lock held.
  1037. */
  1038. static struct se_task *
  1039. transport_get_task_from_execute_queue(struct se_device *dev)
  1040. {
  1041. struct se_task *task;
  1042. if (list_empty(&dev->execute_task_list))
  1043. return NULL;
  1044. list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
  1045. break;
  1046. list_del(&task->t_execute_list);
  1047. atomic_set(&task->task_execute_queue, 0);
  1048. atomic_dec(&dev->execute_tasks);
  1049. return task;
  1050. }
  1051. /* transport_remove_task_from_execute_queue():
  1052. *
  1053. *
  1054. */
  1055. void transport_remove_task_from_execute_queue(
  1056. struct se_task *task,
  1057. struct se_device *dev)
  1058. {
  1059. unsigned long flags;
  1060. if (atomic_read(&task->task_execute_queue) == 0) {
  1061. dump_stack();
  1062. return;
  1063. }
  1064. spin_lock_irqsave(&dev->execute_task_lock, flags);
  1065. list_del(&task->t_execute_list);
  1066. atomic_set(&task->task_execute_queue, 0);
  1067. atomic_dec(&dev->execute_tasks);
  1068. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  1069. }
  1070. unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
  1071. {
  1072. switch (cmd->data_direction) {
  1073. case DMA_NONE:
  1074. return "NONE";
  1075. case DMA_FROM_DEVICE:
  1076. return "READ";
  1077. case DMA_TO_DEVICE:
  1078. return "WRITE";
  1079. case DMA_BIDIRECTIONAL:
  1080. return "BIDI";
  1081. default:
  1082. break;
  1083. }
  1084. return "UNKNOWN";
  1085. }
  1086. void transport_dump_dev_state(
  1087. struct se_device *dev,
  1088. char *b,
  1089. int *bl)
  1090. {
  1091. *bl += sprintf(b + *bl, "Status: ");
  1092. switch (dev->dev_status) {
  1093. case TRANSPORT_DEVICE_ACTIVATED:
  1094. *bl += sprintf(b + *bl, "ACTIVATED");
  1095. break;
  1096. case TRANSPORT_DEVICE_DEACTIVATED:
  1097. *bl += sprintf(b + *bl, "DEACTIVATED");
  1098. break;
  1099. case TRANSPORT_DEVICE_SHUTDOWN:
  1100. *bl += sprintf(b + *bl, "SHUTDOWN");
  1101. break;
  1102. case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
  1103. case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
  1104. *bl += sprintf(b + *bl, "OFFLINE");
  1105. break;
  1106. default:
  1107. *bl += sprintf(b + *bl, "UNKNOWN=%d", dev->dev_status);
  1108. break;
  1109. }
  1110. *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
  1111. atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
  1112. dev->queue_depth);
  1113. *bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
  1114. DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
  1115. *bl += sprintf(b + *bl, " ");
  1116. }
  1117. /* transport_release_all_cmds():
  1118. *
  1119. *
  1120. */
  1121. static void transport_release_all_cmds(struct se_device *dev)
  1122. {
  1123. struct se_cmd *cmd = NULL;
  1124. struct se_queue_req *qr = NULL, *qr_p = NULL;
  1125. int bug_out = 0, t_state;
  1126. unsigned long flags;
  1127. spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
  1128. list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
  1129. qr_list) {
  1130. cmd = (struct se_cmd *)qr->cmd;
  1131. t_state = qr->state;
  1132. list_del(&qr->qr_list);
  1133. kfree(qr);
  1134. spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
  1135. flags);
  1136. printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
  1137. " t_state: %u directly\n",
  1138. CMD_TFO(cmd)->get_task_tag(cmd),
  1139. CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
  1140. transport_release_fe_cmd(cmd);
  1141. bug_out = 1;
  1142. spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
  1143. }
  1144. spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
  1145. #if 0
  1146. if (bug_out)
  1147. BUG();
  1148. #endif
  1149. }
  1150. void transport_dump_vpd_proto_id(
  1151. struct t10_vpd *vpd,
  1152. unsigned char *p_buf,
  1153. int p_buf_len)
  1154. {
  1155. unsigned char buf[VPD_TMP_BUF_SIZE];
  1156. int len;
  1157. memset(buf, 0, VPD_TMP_BUF_SIZE);
  1158. len = sprintf(buf, "T10 VPD Protocol Identifier: ");
  1159. switch (vpd->protocol_identifier) {
  1160. case 0x00:
  1161. sprintf(buf+len, "Fibre Channel\n");
  1162. break;
  1163. case 0x10:
  1164. sprintf(buf+len, "Parallel SCSI\n");
  1165. break;
  1166. case 0x20:
  1167. sprintf(buf+len, "SSA\n");
  1168. break;
  1169. case 0x30:
  1170. sprintf(buf+len, "IEEE 1394\n");
  1171. break;
  1172. case 0x40:
  1173. sprintf(buf+len, "SCSI Remote Direct Memory Access"
  1174. " Protocol\n");
  1175. break;
  1176. case 0x50:
  1177. sprintf(buf+len, "Internet SCSI (iSCSI)\n");
  1178. break;
  1179. case 0x60:
  1180. sprintf(buf+len, "SAS Serial SCSI Protocol\n");
  1181. break;
  1182. case 0x70:
  1183. sprintf(buf+len, "Automation/Drive Interface Transport"
  1184. " Protocol\n");
  1185. break;
  1186. case 0x80:
  1187. sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
  1188. break;
  1189. default:
  1190. sprintf(buf+len, "Unknown 0x%02x\n",
  1191. vpd->protocol_identifier);
  1192. break;
  1193. }
  1194. if (p_buf)
  1195. strncpy(p_buf, buf, p_buf_len);
  1196. else
  1197. printk(KERN_INFO "%s", buf);
  1198. }
  1199. void
  1200. transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
  1201. {
  1202. /*
  1203. * Check if the Protocol Identifier Valid (PIV) bit is set..
  1204. *
  1205. * from spc3r23.pdf section 7.5.1
  1206. */
  1207. if (page_83[1] & 0x80) {
  1208. vpd->protocol_identifier = (page_83[0] & 0xf0);
  1209. vpd->protocol_identifier_set = 1;
  1210. transport_dump_vpd_proto_id(vpd, NULL, 0);
  1211. }
  1212. }
  1213. EXPORT_SYMBOL(transport_set_vpd_proto_id);
  1214. int transport_dump_vpd_assoc(
  1215. struct t10_vpd *vpd,
  1216. unsigned char *p_buf,
  1217. int p_buf_len)
  1218. {
  1219. unsigned char buf[VPD_TMP_BUF_SIZE];
  1220. int ret = 0, len;
  1221. memset(buf, 0, VPD_TMP_BUF_SIZE);
  1222. len = sprintf(buf, "T10 VPD Identifier Association: ");
  1223. switch (vpd->association) {
  1224. case 0x00:
  1225. sprintf(buf+len, "addressed logical unit\n");
  1226. break;
  1227. case 0x10:
  1228. sprintf(buf+len, "target port\n");
  1229. break;
  1230. case 0x20:
  1231. sprintf(buf+len, "SCSI target device\n");
  1232. break;
  1233. default:
  1234. sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
  1235. ret = -1;
  1236. break;
  1237. }
  1238. if (p_buf)
  1239. strncpy(p_buf, buf, p_buf_len);
  1240. else
  1241. printk("%s", buf);
  1242. return ret;
  1243. }
  1244. int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
  1245. {
  1246. /*
  1247. * The VPD identification association..
  1248. *
  1249. * from spc3r23.pdf Section 7.6.3.1 Table 297
  1250. */
  1251. vpd->association = (page_83[1] & 0x30);
  1252. return transport_dump_vpd_assoc(vpd, NULL, 0);
  1253. }
  1254. EXPORT_SYMBOL(transport_set_vpd_assoc);
  1255. int transport_dump_vpd_ident_type(
  1256. struct t10_vpd *vpd,
  1257. unsigned char *p_buf,
  1258. int p_buf_len)
  1259. {
  1260. unsigned char buf[VPD_TMP_BUF_SIZE];
  1261. int ret = 0, len;
  1262. memset(buf, 0, VPD_TMP_BUF_SIZE);
  1263. len = sprintf(buf, "T10 VPD Identifier Type: ");
  1264. switch (vpd->device_identifier_type) {
  1265. case 0x00:
  1266. sprintf(buf+len, "Vendor specific\n");
  1267. break;
  1268. case 0x01:
  1269. sprintf(buf+len, "T10 Vendor ID based\n");
  1270. break;
  1271. case 0x02:
  1272. sprintf(buf+len, "EUI-64 based\n");
  1273. break;
  1274. case 0x03:
  1275. sprintf(buf+len, "NAA\n");
  1276. break;
  1277. case 0x04:
  1278. sprintf(buf+len, "Relative target port identifier\n");
  1279. break;
  1280. case 0x08:
  1281. sprintf(buf+len, "SCSI name string\n");
  1282. break;
  1283. default:
  1284. sprintf(buf+len, "Unsupported: 0x%02x\n",
  1285. vpd->device_identifier_type);
  1286. ret = -1;
  1287. break;
  1288. }
  1289. if (p_buf)
  1290. strncpy(p_buf, buf, p_buf_len);
  1291. else
  1292. printk("%s", buf);
  1293. return ret;
  1294. }
  1295. int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
  1296. {
  1297. /*
  1298. * The VPD identifier type..
  1299. *
  1300. * from spc3r23.pdf Section 7.6.3.1 Table 298
  1301. */
  1302. vpd->device_identifier_type = (page_83[1] & 0x0f);
  1303. return transport_dump_vpd_ident_type(vpd, NULL, 0);
  1304. }
  1305. EXPORT_SYMBOL(transport_set_vpd_ident_type);
  1306. int transport_dump_vpd_ident(
  1307. struct t10_vpd *vpd,
  1308. unsigned char *p_buf,
  1309. int p_buf_len)
  1310. {
  1311. unsigned char buf[VPD_TMP_BUF_SIZE];
  1312. int ret = 0;
  1313. memset(buf, 0, VPD_TMP_BUF_SIZE);
  1314. switch (vpd->device_identifier_code_set) {
  1315. case 0x01: /* Binary */
  1316. sprintf(buf, "T10 VPD Binary Device Identifier: %s\n",
  1317. &vpd->device_identifier[0]);
  1318. break;
  1319. case 0x02: /* ASCII */
  1320. sprintf(buf, "T10 VPD ASCII Device Identifier: %s\n",
  1321. &vpd->device_identifier[0]);
  1322. break;
  1323. case 0x03: /* UTF-8 */
  1324. sprintf(buf, "T10 VPD UTF-8 Device Identifier: %s\n",
  1325. &vpd->device_identifier[0]);
  1326. break;
  1327. default:
  1328. sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
  1329. " 0x%02x", vpd->device_identifier_code_set);
  1330. ret = -1;
  1331. break;
  1332. }
  1333. if (p_buf)
  1334. strncpy(p_buf, buf, p_buf_len);
  1335. else
  1336. printk("%s", buf);
  1337. return ret;
  1338. }
  1339. int
  1340. transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
  1341. {
  1342. static const char hex_str[] = "0123456789abcdef";
  1343. int j = 0, i = 4; /* offset to start of the identifer */
  1344. /*
  1345. * The VPD Code Set (encoding)
  1346. *
  1347. * from spc3r23.pdf Section 7.6.3.1 Table 296
  1348. */
  1349. vpd->device_identifier_code_set = (page_83[0] & 0x0f);
  1350. switch (vpd->device_identifier_code_set) {
  1351. case 0x01: /* Binary */
  1352. vpd->device_identifier[j++] =
  1353. hex_str[vpd->device_identifier_type];
  1354. while (i < (4 + page_83[3])) {
  1355. vpd->device_identifier[j++] =
  1356. hex_str[(page_83[i] & 0xf0) >> 4];
  1357. vpd->device_identifier[j++] =
  1358. hex_str[page_83[i] & 0x0f];
  1359. i++;
  1360. }
  1361. break;
  1362. case 0x02: /* ASCII */
  1363. case 0x03: /* UTF-8 */
  1364. while (i < (4 + page_83[3]))
  1365. vpd->device_identifier[j++] = page_83[i++];
  1366. break;
  1367. default:
  1368. break;
  1369. }
  1370. return transport_dump_vpd_ident(vpd, NULL, 0);
  1371. }
  1372. EXPORT_SYMBOL(transport_set_vpd_ident);
  1373. static void core_setup_task_attr_emulation(struct se_device *dev)
  1374. {
  1375. /*
  1376. * If this device is from Target_Core_Mod/pSCSI, disable the
  1377. * SAM Task Attribute emulation.
  1378. *
  1379. * This is currently not available in upsream Linux/SCSI Target
  1380. * mode code, and is assumed to be disabled while using TCM/pSCSI.
  1381. */
  1382. if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1383. dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
  1384. return;
  1385. }
  1386. dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
  1387. DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
  1388. " device\n", TRANSPORT(dev)->name,
  1389. TRANSPORT(dev)->get_device_rev(dev));
  1390. }
  1391. static void scsi_dump_inquiry(struct se_device *dev)
  1392. {
  1393. struct t10_wwn *wwn = DEV_T10_WWN(dev);
  1394. int i, device_type;
  1395. /*
  1396. * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
  1397. */
  1398. printk(" Vendor: ");
  1399. for (i = 0; i < 8; i++)
  1400. if (wwn->vendor[i] >= 0x20)
  1401. printk("%c", wwn->vendor[i]);
  1402. else
  1403. printk(" ");
  1404. printk(" Model: ");
  1405. for (i = 0; i < 16; i++)
  1406. if (wwn->model[i] >= 0x20)
  1407. printk("%c", wwn->model[i]);
  1408. else
  1409. printk(" ");
  1410. printk(" Revision: ");
  1411. for (i = 0; i < 4; i++)
  1412. if (wwn->revision[i] >= 0x20)
  1413. printk("%c", wwn->revision[i]);
  1414. else
  1415. printk(" ");
  1416. printk("\n");
  1417. device_type = TRANSPORT(dev)->get_device_type(dev);
  1418. printk(" Type: %s ", scsi_device_type(device_type));
  1419. printk(" ANSI SCSI revision: %02x\n",
  1420. TRANSPORT(dev)->get_device_rev(dev));
  1421. }
  1422. struct se_device *transport_add_device_to_core_hba(
  1423. struct se_hba *hba,
  1424. struct se_subsystem_api *transport,
  1425. struct se_subsystem_dev *se_dev,
  1426. u32 device_flags,
  1427. void *transport_dev,
  1428. struct se_dev_limits *dev_limits,
  1429. const char *inquiry_prod,
  1430. const char *inquiry_rev)
  1431. {
  1432. int force_pt;
  1433. struct se_device *dev;
  1434. dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
  1435. if (!(dev)) {
  1436. printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
  1437. return NULL;
  1438. }
  1439. dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
  1440. if (!(dev->dev_queue_obj)) {
  1441. printk(KERN_ERR "Unable to allocate memory for"
  1442. " dev->dev_queue_obj\n");
  1443. kfree(dev);
  1444. return NULL;
  1445. }
  1446. transport_init_queue_obj(dev->dev_queue_obj);
  1447. dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
  1448. GFP_KERNEL);
  1449. if (!(dev->dev_status_queue_obj)) {
  1450. printk(KERN_ERR "Unable to allocate memory for"
  1451. " dev->dev_status_queue_obj\n");
  1452. kfree(dev->dev_queue_obj);
  1453. kfree(dev);
  1454. return NULL;
  1455. }
  1456. transport_init_queue_obj(dev->dev_status_queue_obj);
  1457. dev->dev_flags = device_flags;
  1458. dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
  1459. dev->dev_ptr = (void *) transport_dev;
  1460. dev->se_hba = hba;
  1461. dev->se_sub_dev = se_dev;
  1462. dev->transport = transport;
  1463. atomic_set(&dev->active_cmds, 0);
  1464. INIT_LIST_HEAD(&dev->dev_list);
  1465. INIT_LIST_HEAD(&dev->dev_sep_list);
  1466. INIT_LIST_HEAD(&dev->dev_tmr_list);
  1467. INIT_LIST_HEAD(&dev->execute_task_list);
  1468. INIT_LIST_HEAD(&dev->delayed_cmd_list);
  1469. INIT_LIST_HEAD(&dev->ordered_cmd_list);
  1470. INIT_LIST_HEAD(&dev->state_task_list);
  1471. spin_lock_init(&dev->execute_task_lock);
  1472. spin_lock_init(&dev->delayed_cmd_lock);
  1473. spin_lock_init(&dev->ordered_cmd_lock);
  1474. spin_lock_init(&dev->state_task_lock);
  1475. spin_lock_init(&dev->dev_alua_lock);
  1476. spin_lock_init(&dev->dev_reservation_lock);
  1477. spin_lock_init(&dev->dev_status_lock);
  1478. spin_lock_init(&dev->dev_status_thr_lock);
  1479. spin_lock_init(&dev->se_port_lock);
  1480. spin_lock_init(&dev->se_tmr_lock);
  1481. dev->queue_depth = dev_limits->queue_depth;
  1482. atomic_set(&dev->depth_left, dev->queue_depth);
  1483. atomic_set(&dev->dev_ordered_id, 0);
  1484. se_dev_set_default_attribs(dev, dev_limits);
  1485. dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
  1486. dev->creation_time = get_jiffies_64();
  1487. spin_lock_init(&dev->stats_lock);
  1488. spin_lock(&hba->device_lock);
  1489. list_add_tail(&dev->dev_list, &hba->hba_dev_list);
  1490. hba->dev_count++;
  1491. spin_unlock(&hba->device_lock);
  1492. /*
  1493. * Setup the SAM Task Attribute emulation for struct se_device
  1494. */
  1495. core_setup_task_attr_emulation(dev);
  1496. /*
  1497. * Force PR and ALUA passthrough emulation with internal object use.
  1498. */
  1499. force_pt = (hba->hba_flags & HBA_FLAGS_INTERNAL_USE);
  1500. /*
  1501. * Setup the Reservations infrastructure for struct se_device
  1502. */
  1503. core_setup_reservations(dev, force_pt);
  1504. /*
  1505. * Setup the Asymmetric Logical Unit Assignment for struct se_device
  1506. */
  1507. if (core_setup_alua(dev, force_pt) < 0)
  1508. goto out;
  1509. /*
  1510. * Startup the struct se_device processing thread
  1511. */
  1512. dev->process_thread = kthread_run(transport_processing_thread, dev,
  1513. "LIO_%s", TRANSPORT(dev)->name);
  1514. if (IS_ERR(dev->process_thread)) {
  1515. printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
  1516. TRANSPORT(dev)->name);
  1517. goto out;
  1518. }
  1519. /*
  1520. * Preload the initial INQUIRY const values if we are doing
  1521. * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
  1522. * passthrough because this is being provided by the backend LLD.
  1523. * This is required so that transport_get_inquiry() copies these
  1524. * originals once back into DEV_T10_WWN(dev) for the virtual device
  1525. * setup.
  1526. */
  1527. if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
  1528. if (!(inquiry_prod) || !(inquiry_prod)) {
  1529. printk(KERN_ERR "All non TCM/pSCSI plugins require"
  1530. " INQUIRY consts\n");
  1531. goto out;
  1532. }
  1533. strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
  1534. strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
  1535. strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
  1536. }
  1537. scsi_dump_inquiry(dev);
  1538. return dev;
  1539. out:
  1540. kthread_stop(dev->process_thread);
  1541. spin_lock(&hba->device_lock);
  1542. list_del(&dev->dev_list);
  1543. hba->dev_count--;
  1544. spin_unlock(&hba->device_lock);
  1545. se_release_vpd_for_dev(dev);
  1546. kfree(dev->dev_status_queue_obj);
  1547. kfree(dev->dev_queue_obj);
  1548. kfree(dev);
  1549. return NULL;
  1550. }
  1551. EXPORT_SYMBOL(transport_add_device_to_core_hba);
  1552. /* transport_generic_prepare_cdb():
  1553. *
  1554. * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
  1555. * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
  1556. * The point of this is since we are mapping iSCSI LUNs to
  1557. * SCSI Target IDs having a non-zero LUN in the CDB will throw the
  1558. * devices and HBAs for a loop.
  1559. */
  1560. static inline void transport_generic_prepare_cdb(
  1561. unsigned char *cdb)
  1562. {
  1563. switch (cdb[0]) {
  1564. case READ_10: /* SBC - RDProtect */
  1565. case READ_12: /* SBC - RDProtect */
  1566. case READ_16: /* SBC - RDProtect */
  1567. case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
  1568. case VERIFY: /* SBC - VRProtect */
  1569. case VERIFY_16: /* SBC - VRProtect */
  1570. case WRITE_VERIFY: /* SBC - VRProtect */
  1571. case WRITE_VERIFY_12: /* SBC - VRProtect */
  1572. break;
  1573. default:
  1574. cdb[1] &= 0x1f; /* clear logical unit number */
  1575. break;
  1576. }
  1577. }
  1578. static struct se_task *
  1579. transport_generic_get_task(struct se_cmd *cmd,
  1580. enum dma_data_direction data_direction)
  1581. {
  1582. struct se_task *task;
  1583. struct se_device *dev = SE_DEV(cmd);
  1584. unsigned long flags;
  1585. task = dev->transport->alloc_task(cmd);
  1586. if (!task) {
  1587. printk(KERN_ERR "Unable to allocate struct se_task\n");
  1588. return NULL;
  1589. }
  1590. INIT_LIST_HEAD(&task->t_list);
  1591. INIT_LIST_HEAD(&task->t_execute_list);
  1592. INIT_LIST_HEAD(&task->t_state_list);
  1593. init_completion(&task->task_stop_comp);
  1594. task->task_no = T_TASK(cmd)->t_tasks_no++;
  1595. task->task_se_cmd = cmd;
  1596. task->se_dev = dev;
  1597. task->task_data_direction = data_direction;
  1598. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  1599. list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
  1600. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  1601. return task;
  1602. }
  1603. static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
  1604. void transport_device_setup_cmd(struct se_cmd *cmd)
  1605. {
  1606. cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
  1607. }
  1608. EXPORT_SYMBOL(transport_device_setup_cmd);
  1609. /*
  1610. * Used by fabric modules containing a local struct se_cmd within their
  1611. * fabric dependent per I/O descriptor.
  1612. */
  1613. void transport_init_se_cmd(
  1614. struct se_cmd *cmd,
  1615. struct target_core_fabric_ops *tfo,
  1616. struct se_session *se_sess,
  1617. u32 data_length,
  1618. int data_direction,
  1619. int task_attr,
  1620. unsigned char *sense_buffer)
  1621. {
  1622. INIT_LIST_HEAD(&cmd->se_lun_list);
  1623. INIT_LIST_HEAD(&cmd->se_delayed_list);
  1624. INIT_LIST_HEAD(&cmd->se_ordered_list);
  1625. /*
  1626. * Setup t_task pointer to t_task_backstore
  1627. */
  1628. cmd->t_task = &cmd->t_task_backstore;
  1629. INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
  1630. init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
  1631. init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
  1632. init_completion(&T_TASK(cmd)->t_transport_stop_comp);
  1633. spin_lock_init(&T_TASK(cmd)->t_state_lock);
  1634. atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
  1635. cmd->se_tfo = tfo;
  1636. cmd->se_sess = se_sess;
  1637. cmd->data_length = data_length;
  1638. cmd->data_direction = data_direction;
  1639. cmd->sam_task_attr = task_attr;
  1640. cmd->sense_buffer = sense_buffer;
  1641. }
  1642. EXPORT_SYMBOL(transport_init_se_cmd);
  1643. static int transport_check_alloc_task_attr(struct se_cmd *cmd)
  1644. {
  1645. /*
  1646. * Check if SAM Task Attribute emulation is enabled for this
  1647. * struct se_device storage object
  1648. */
  1649. if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
  1650. return 0;
  1651. if (cmd->sam_task_attr == MSG_ACA_TAG) {
  1652. DEBUG_STA("SAM Task Attribute ACA"
  1653. " emulation is not supported\n");
  1654. return -1;
  1655. }
  1656. /*
  1657. * Used to determine when ORDERED commands should go from
  1658. * Dormant to Active status.
  1659. */
  1660. cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
  1661. smp_mb__after_atomic_inc();
  1662. DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
  1663. cmd->se_ordered_id, cmd->sam_task_attr,
  1664. TRANSPORT(cmd->se_dev)->name);
  1665. return 0;
  1666. }
  1667. void transport_free_se_cmd(
  1668. struct se_cmd *se_cmd)
  1669. {
  1670. if (se_cmd->se_tmr_req)
  1671. core_tmr_release_req(se_cmd->se_tmr_req);
  1672. /*
  1673. * Check and free any extended CDB buffer that was allocated
  1674. */
  1675. if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
  1676. kfree(T_TASK(se_cmd)->t_task_cdb);
  1677. }
  1678. EXPORT_SYMBOL(transport_free_se_cmd);
  1679. static void transport_generic_wait_for_tasks(struct se_cmd *, int, int);
  1680. /* transport_generic_allocate_tasks():
  1681. *
  1682. * Called from fabric RX Thread.
  1683. */
  1684. int transport_generic_allocate_tasks(
  1685. struct se_cmd *cmd,
  1686. unsigned char *cdb)
  1687. {
  1688. int ret;
  1689. transport_generic_prepare_cdb(cdb);
  1690. /*
  1691. * This is needed for early exceptions.
  1692. */
  1693. cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
  1694. transport_device_setup_cmd(cmd);
  1695. /*
  1696. * Ensure that the received CDB is less than the max (252 + 8) bytes
  1697. * for VARIABLE_LENGTH_CMD
  1698. */
  1699. if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
  1700. printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
  1701. " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
  1702. scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
  1703. return -1;
  1704. }
  1705. /*
  1706. * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
  1707. * allocate the additional extended CDB buffer now.. Otherwise
  1708. * setup the pointer from __t_task_cdb to t_task_cdb.
  1709. */
  1710. if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
  1711. T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
  1712. GFP_KERNEL);
  1713. if (!(T_TASK(cmd)->t_task_cdb)) {
  1714. printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
  1715. " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
  1716. scsi_command_size(cdb),
  1717. (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
  1718. return -1;
  1719. }
  1720. } else
  1721. T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
  1722. /*
  1723. * Copy the original CDB into T_TASK(cmd).
  1724. */
  1725. memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
  1726. /*
  1727. * Setup the received CDB based on SCSI defined opcodes and
  1728. * perform unit attention, persistent reservations and ALUA
  1729. * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb
  1730. * pointer is expected to be setup before we reach this point.
  1731. */
  1732. ret = transport_generic_cmd_sequencer(cmd, cdb);
  1733. if (ret < 0)
  1734. return ret;
  1735. /*
  1736. * Check for SAM Task Attribute Emulation
  1737. */
  1738. if (transport_check_alloc_task_attr(cmd) < 0) {
  1739. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  1740. cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
  1741. return -2;
  1742. }
  1743. spin_lock(&cmd->se_lun->lun_sep_lock);
  1744. if (cmd->se_lun->lun_sep)
  1745. cmd->se_lun->lun_sep->sep_stats.cmd_pdus++;
  1746. spin_unlock(&cmd->se_lun->lun_sep_lock);
  1747. return 0;
  1748. }
  1749. EXPORT_SYMBOL(transport_generic_allocate_tasks);
  1750. /*
  1751. * Used by fabric module frontends not defining a TFO->new_cmd_map()
  1752. * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis
  1753. */
  1754. int transport_generic_handle_cdb(
  1755. struct se_cmd *cmd)
  1756. {
  1757. if (!SE_LUN(cmd)) {
  1758. dump_stack();
  1759. printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
  1760. return -1;
  1761. }
  1762. transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
  1763. return 0;
  1764. }
  1765. EXPORT_SYMBOL(transport_generic_handle_cdb);
  1766. /*
  1767. * Used by fabric module frontends defining a TFO->new_cmd_map() caller
  1768. * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
  1769. * complete setup in TCM process context w/ TFO->new_cmd_map().
  1770. */
  1771. int transport_generic_handle_cdb_map(
  1772. struct se_cmd *cmd)
  1773. {
  1774. if (!SE_LUN(cmd)) {
  1775. dump_stack();
  1776. printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
  1777. return -1;
  1778. }
  1779. transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
  1780. return 0;
  1781. }
  1782. EXPORT_SYMBOL(transport_generic_handle_cdb_map);
  1783. /* transport_generic_handle_data():
  1784. *
  1785. *
  1786. */
  1787. int transport_generic_handle_data(
  1788. struct se_cmd *cmd)
  1789. {
  1790. /*
  1791. * For the software fabric case, then we assume the nexus is being
  1792. * failed/shutdown when signals are pending from the kthread context
  1793. * caller, so we return a failure. For the HW target mode case running
  1794. * in interrupt code, the signal_pending() check is skipped.
  1795. */
  1796. if (!in_interrupt() && signal_pending(current))
  1797. return -1;
  1798. /*
  1799. * If the received CDB has aleady been ABORTED by the generic
  1800. * target engine, we now call transport_check_aborted_status()
  1801. * to queue any delated TASK_ABORTED status for the received CDB to the
  1802. * fabric module as we are expecting no further incoming DATA OUT
  1803. * sequences at this point.
  1804. */
  1805. if (transport_check_aborted_status(cmd, 1) != 0)
  1806. return 0;
  1807. transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE);
  1808. return 0;
  1809. }
  1810. EXPORT_SYMBOL(transport_generic_handle_data);
  1811. /* transport_generic_handle_tmr():
  1812. *
  1813. *
  1814. */
  1815. int transport_generic_handle_tmr(
  1816. struct se_cmd *cmd)
  1817. {
  1818. /*
  1819. * This is needed for early exceptions.
  1820. */
  1821. cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
  1822. transport_device_setup_cmd(cmd);
  1823. transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
  1824. return 0;
  1825. }
  1826. EXPORT_SYMBOL(transport_generic_handle_tmr);
  1827. void transport_generic_free_cmd_intr(
  1828. struct se_cmd *cmd)
  1829. {
  1830. transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR);
  1831. }
  1832. EXPORT_SYMBOL(transport_generic_free_cmd_intr);
  1833. static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
  1834. {
  1835. struct se_task *task, *task_tmp;
  1836. unsigned long flags;
  1837. int ret = 0;
  1838. DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
  1839. CMD_TFO(cmd)->get_task_tag(cmd));
  1840. /*
  1841. * No tasks remain in the execution queue
  1842. */
  1843. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  1844. list_for_each_entry_safe(task, task_tmp,
  1845. &T_TASK(cmd)->t_task_list, t_list) {
  1846. DEBUG_TS("task_no[%d] - Processing task %p\n",
  1847. task->task_no, task);
  1848. /*
  1849. * If the struct se_task has not been sent and is not active,
  1850. * remove the struct se_task from the execution queue.
  1851. */
  1852. if (!atomic_read(&task->task_sent) &&
  1853. !atomic_read(&task->task_active)) {
  1854. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
  1855. flags);
  1856. transport_remove_task_from_execute_queue(task,
  1857. task->se_dev);
  1858. DEBUG_TS("task_no[%d] - Removed from execute queue\n",
  1859. task->task_no);
  1860. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  1861. continue;
  1862. }
  1863. /*
  1864. * If the struct se_task is active, sleep until it is returned
  1865. * from the plugin.
  1866. */
  1867. if (atomic_read(&task->task_active)) {
  1868. atomic_set(&task->task_stop, 1);
  1869. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
  1870. flags);
  1871. DEBUG_TS("task_no[%d] - Waiting to complete\n",
  1872. task->task_no);
  1873. wait_for_completion(&task->task_stop_comp);
  1874. DEBUG_TS("task_no[%d] - Stopped successfully\n",
  1875. task->task_no);
  1876. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  1877. atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
  1878. atomic_set(&task->task_active, 0);
  1879. atomic_set(&task->task_stop, 0);
  1880. } else {
  1881. DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
  1882. ret++;
  1883. }
  1884. __transport_stop_task_timer(task, &flags);
  1885. }
  1886. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  1887. return ret;
  1888. }
  1889. static void transport_failure_reset_queue_depth(struct se_device *dev)
  1890. {
  1891. unsigned long flags;
  1892. spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
  1893. atomic_inc(&dev->depth_left);
  1894. atomic_inc(&SE_HBA(dev)->left_queue_depth);
  1895. spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
  1896. }
  1897. /*
  1898. * Handle SAM-esque emulation for generic transport request failures.
  1899. */
  1900. static void transport_generic_request_failure(
  1901. struct se_cmd *cmd,
  1902. struct se_device *dev,
  1903. int complete,
  1904. int sc)
  1905. {
  1906. DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
  1907. " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
  1908. T_TASK(cmd)->t_task_cdb[0]);
  1909. DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
  1910. " %d/%d transport_error_status: %d\n",
  1911. CMD_TFO(cmd)->get_cmd_state(cmd),
  1912. cmd->t_state, cmd->deferred_t_state,
  1913. cmd->transport_error_status);
  1914. DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
  1915. " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
  1916. " t_transport_active: %d t_transport_stop: %d"
  1917. " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
  1918. atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
  1919. atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
  1920. atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
  1921. atomic_read(&T_TASK(cmd)->t_transport_active),
  1922. atomic_read(&T_TASK(cmd)->t_transport_stop),
  1923. atomic_read(&T_TASK(cmd)->t_transport_sent));
  1924. transport_stop_all_task_timers(cmd);
  1925. if (dev)
  1926. transport_failure_reset_queue_depth(dev);
  1927. /*
  1928. * For SAM Task Attribute emulation for failed struct se_cmd
  1929. */
  1930. if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  1931. transport_complete_task_attr(cmd);
  1932. if (complete) {
  1933. transport_direct_request_timeout(cmd);
  1934. cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
  1935. }
  1936. switch (cmd->transport_error_status) {
  1937. case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
  1938. cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
  1939. break;
  1940. case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
  1941. cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
  1942. break;
  1943. case PYX_TRANSPORT_INVALID_CDB_FIELD:
  1944. cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
  1945. break;
  1946. case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
  1947. cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
  1948. break;
  1949. case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
  1950. if (!sc)
  1951. transport_new_cmd_failure(cmd);
  1952. /*
  1953. * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
  1954. * we force this session to fall back to session
  1955. * recovery.
  1956. */
  1957. CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
  1958. CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
  1959. goto check_stop;
  1960. case PYX_TRANSPORT_LU_COMM_FAILURE:
  1961. case PYX_TRANSPORT_ILLEGAL_REQUEST:
  1962. cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  1963. break;
  1964. case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
  1965. cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
  1966. break;
  1967. case PYX_TRANSPORT_WRITE_PROTECTED:
  1968. cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
  1969. break;
  1970. case PYX_TRANSPORT_RESERVATION_CONFLICT:
  1971. /*
  1972. * No SENSE Data payload for this case, set SCSI Status
  1973. * and queue the response to $FABRIC_MOD.
  1974. *
  1975. * Uses linux/include/scsi/scsi.h SAM status codes defs
  1976. */
  1977. cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
  1978. /*
  1979. * For UA Interlock Code 11b, a RESERVATION CONFLICT will
  1980. * establish a UNIT ATTENTION with PREVIOUS RESERVATION
  1981. * CONFLICT STATUS.
  1982. *
  1983. * See spc4r17, section 7.4.6 Control Mode Page, Table 349
  1984. */
  1985. if (SE_SESS(cmd) &&
  1986. DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
  1987. core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
  1988. cmd->orig_fe_lun, 0x2C,
  1989. ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
  1990. CMD_TFO(cmd)->queue_status(cmd);
  1991. goto check_stop;
  1992. case PYX_TRANSPORT_USE_SENSE_REASON:
  1993. /*
  1994. * struct se_cmd->scsi_sense_reason already set
  1995. */
  1996. break;
  1997. default:
  1998. printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
  1999. T_TASK(cmd)->t_task_cdb[0],
  2000. cmd->transport_error_status);
  2001. cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
  2002. break;
  2003. }
  2004. if (!sc)
  2005. transport_new_cmd_failure(cmd);
  2006. else
  2007. transport_send_check_condition_and_sense(cmd,
  2008. cmd->scsi_sense_reason, 0);
  2009. check_stop:
  2010. transport_lun_remove_cmd(cmd);
  2011. if (!(transport_cmd_check_stop_to_fabric(cmd)))
  2012. ;
  2013. }
  2014. static void transport_direct_request_timeout(struct se_cmd *cmd)
  2015. {
  2016. unsigned long flags;
  2017. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  2018. if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
  2019. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2020. return;
  2021. }
  2022. if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
  2023. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2024. return;
  2025. }
  2026. atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
  2027. &T_TASK(cmd)->t_se_count);
  2028. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2029. }
  2030. static void transport_generic_request_timeout(struct se_cmd *cmd)
  2031. {
  2032. unsigned long flags;
  2033. /*
  2034. * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
  2035. * to allow last call to free memory resources.
  2036. */
  2037. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  2038. if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
  2039. int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
  2040. atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
  2041. }
  2042. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2043. transport_generic_remove(cmd, 0, 0);
  2044. }
  2045. static int
  2046. transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
  2047. {
  2048. unsigned char *buf;
  2049. buf = kzalloc(data_length, GFP_KERNEL);
  2050. if (!(buf)) {
  2051. printk(KERN_ERR "Unable to allocate memory for buffer\n");
  2052. return -1;
  2053. }
  2054. T_TASK(cmd)->t_tasks_se_num = 0;
  2055. T_TASK(cmd)->t_task_buf = buf;
  2056. return 0;
  2057. }
  2058. static inline u32 transport_lba_21(unsigned char *cdb)
  2059. {
  2060. return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
  2061. }
  2062. static inline u32 transport_lba_32(unsigned char *cdb)
  2063. {
  2064. return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
  2065. }
  2066. static inline unsigned long long transport_lba_64(unsigned char *cdb)
  2067. {
  2068. unsigned int __v1, __v2;
  2069. __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
  2070. __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
  2071. return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
  2072. }
  2073. /*
  2074. * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
  2075. */
  2076. static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
  2077. {
  2078. unsigned int __v1, __v2;
  2079. __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
  2080. __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
  2081. return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
  2082. }
  2083. static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
  2084. {
  2085. unsigned long flags;
  2086. spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
  2087. se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
  2088. spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
  2089. }
  2090. /*
  2091. * Called from interrupt context.
  2092. */
  2093. static void transport_task_timeout_handler(unsigned long data)
  2094. {
  2095. struct se_task *task = (struct se_task *)data;
  2096. struct se_cmd *cmd = TASK_CMD(task);
  2097. unsigned long flags;
  2098. DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
  2099. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  2100. if (task->task_flags & TF_STOP) {
  2101. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2102. return;
  2103. }
  2104. task->task_flags &= ~TF_RUNNING;
  2105. /*
  2106. * Determine if transport_complete_task() has already been called.
  2107. */
  2108. if (!(atomic_read(&task->task_active))) {
  2109. DEBUG_TT("transport task: %p cmd: %p timeout task_active"
  2110. " == 0\n", task, cmd);
  2111. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2112. return;
  2113. }
  2114. atomic_inc(&T_TASK(cmd)->t_se_count);
  2115. atomic_inc(&T_TASK(cmd)->t_transport_timeout);
  2116. T_TASK(cmd)->t_tasks_failed = 1;
  2117. atomic_set(&task->task_timeout, 1);
  2118. task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
  2119. task->task_scsi_status = 1;
  2120. if (atomic_read(&task->task_stop)) {
  2121. DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
  2122. " == 1\n", task, cmd);
  2123. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2124. complete(&task->task_stop_comp);
  2125. return;
  2126. }
  2127. if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
  2128. DEBUG_TT("transport task: %p cmd: %p timeout non zero"
  2129. " t_task_cdbs_left\n", task, cmd);
  2130. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2131. return;
  2132. }
  2133. DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
  2134. task, cmd);
  2135. cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
  2136. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2137. transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
  2138. }
  2139. /*
  2140. * Called with T_TASK(cmd)->t_state_lock held.
  2141. */
  2142. static void transport_start_task_timer(struct se_task *task)
  2143. {
  2144. struct se_device *dev = task->se_dev;
  2145. int timeout;
  2146. if (task->task_flags & TF_RUNNING)
  2147. return;
  2148. /*
  2149. * If the task_timeout is disabled, exit now.
  2150. */
  2151. timeout = DEV_ATTRIB(dev)->task_timeout;
  2152. if (!(timeout))
  2153. return;
  2154. init_timer(&task->task_timer);
  2155. task->task_timer.expires = (get_jiffies_64() + timeout * HZ);
  2156. task->task_timer.data = (unsigned long) task;
  2157. task->task_timer.function = transport_task_timeout_handler;
  2158. task->task_flags |= TF_RUNNING;
  2159. add_timer(&task->task_timer);
  2160. #if 0
  2161. printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
  2162. " %d\n", task->task_se_cmd, task, timeout);
  2163. #endif
  2164. }
  2165. /*
  2166. * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
  2167. */
  2168. void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
  2169. {
  2170. struct se_cmd *cmd = TASK_CMD(task);
  2171. if (!(task->task_flags & TF_RUNNING))
  2172. return;
  2173. task->task_flags |= TF_STOP;
  2174. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
  2175. del_timer_sync(&task->task_timer);
  2176. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
  2177. task->task_flags &= ~TF_RUNNING;
  2178. task->task_flags &= ~TF_STOP;
  2179. }
  2180. static void transport_stop_all_task_timers(struct se_cmd *cmd)
  2181. {
  2182. struct se_task *task = NULL, *task_tmp;
  2183. unsigned long flags;
  2184. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  2185. list_for_each_entry_safe(task, task_tmp,
  2186. &T_TASK(cmd)->t_task_list, t_list)
  2187. __transport_stop_task_timer(task, &flags);
  2188. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2189. }
  2190. static inline int transport_tcq_window_closed(struct se_device *dev)
  2191. {
  2192. if (dev->dev_tcq_window_closed++ <
  2193. PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
  2194. msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
  2195. } else
  2196. msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
  2197. wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
  2198. return 0;
  2199. }
  2200. /*
  2201. * Called from Fabric Module context from transport_execute_tasks()
  2202. *
  2203. * The return of this function determins if the tasks from struct se_cmd
  2204. * get added to the execution queue in transport_execute_tasks(),
  2205. * or are added to the delayed or ordered lists here.
  2206. */
  2207. static inline int transport_execute_task_attr(struct se_cmd *cmd)
  2208. {
  2209. if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
  2210. return 1;
  2211. /*
  2212. * Check for the existence of HEAD_OF_QUEUE, and if true return 1
  2213. * to allow the passed struct se_cmd list of tasks to the front of the list.
  2214. */
  2215. if (cmd->sam_task_attr == MSG_HEAD_TAG) {
  2216. atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
  2217. smp_mb__after_atomic_inc();
  2218. DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
  2219. " 0x%02x, se_ordered_id: %u\n",
  2220. T_TASK(cmd)->t_task_cdb[0],
  2221. cmd->se_ordered_id);
  2222. return 1;
  2223. } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
  2224. spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
  2225. list_add_tail(&cmd->se_ordered_list,
  2226. &SE_DEV(cmd)->ordered_cmd_list);
  2227. spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
  2228. atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
  2229. smp_mb__after_atomic_inc();
  2230. DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
  2231. " list, se_ordered_id: %u\n",
  2232. T_TASK(cmd)->t_task_cdb[0],
  2233. cmd->se_ordered_id);
  2234. /*
  2235. * Add ORDERED command to tail of execution queue if
  2236. * no other older commands exist that need to be
  2237. * completed first.
  2238. */
  2239. if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
  2240. return 1;
  2241. } else {
  2242. /*
  2243. * For SIMPLE and UNTAGGED Task Attribute commands
  2244. */
  2245. atomic_inc(&SE_DEV(cmd)->simple_cmds);
  2246. smp_mb__after_atomic_inc();
  2247. }
  2248. /*
  2249. * Otherwise if one or more outstanding ORDERED task attribute exist,
  2250. * add the dormant task(s) built for the passed struct se_cmd to the
  2251. * execution queue and become in Active state for this struct se_device.
  2252. */
  2253. if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
  2254. /*
  2255. * Otherwise, add cmd w/ tasks to delayed cmd queue that
  2256. * will be drained upon completion of HEAD_OF_QUEUE task.
  2257. */
  2258. spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
  2259. cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
  2260. list_add_tail(&cmd->se_delayed_list,
  2261. &SE_DEV(cmd)->delayed_cmd_list);
  2262. spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
  2263. DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
  2264. " delayed CMD list, se_ordered_id: %u\n",
  2265. T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
  2266. cmd->se_ordered_id);
  2267. /*
  2268. * Return zero to let transport_execute_tasks() know
  2269. * not to add the delayed tasks to the execution list.
  2270. */
  2271. return 0;
  2272. }
  2273. /*
  2274. * Otherwise, no ORDERED task attributes exist..
  2275. */
  2276. return 1;
  2277. }
  2278. /*
  2279. * Called from fabric module context in transport_generic_new_cmd() and
  2280. * transport_generic_process_write()
  2281. */
  2282. static int transport_execute_tasks(struct se_cmd *cmd)
  2283. {
  2284. int add_tasks;
  2285. if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
  2286. if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
  2287. cmd->transport_error_status =
  2288. PYX_TRANSPORT_LU_COMM_FAILURE;
  2289. transport_generic_request_failure(cmd, NULL, 0, 1);
  2290. return 0;
  2291. }
  2292. }
  2293. /*
  2294. * Call transport_cmd_check_stop() to see if a fabric exception
  2295. * has occurred that prevents execution.
  2296. */
  2297. if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
  2298. /*
  2299. * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
  2300. * attribute for the tasks of the received struct se_cmd CDB
  2301. */
  2302. add_tasks = transport_execute_task_attr(cmd);
  2303. if (add_tasks == 0)
  2304. goto execute_tasks;
  2305. /*
  2306. * This calls transport_add_tasks_from_cmd() to handle
  2307. * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
  2308. * (if enabled) in __transport_add_task_to_execute_queue() and
  2309. * transport_add_task_check_sam_attr().
  2310. */
  2311. transport_add_tasks_from_cmd(cmd);
  2312. }
  2313. /*
  2314. * Kick the execution queue for the cmd associated struct se_device
  2315. * storage object.
  2316. */
  2317. execute_tasks:
  2318. __transport_execute_tasks(SE_DEV(cmd));
  2319. return 0;
  2320. }
  2321. /*
  2322. * Called to check struct se_device tcq depth window, and once open pull struct se_task
  2323. * from struct se_device->execute_task_list and
  2324. *
  2325. * Called from transport_processing_thread()
  2326. */
  2327. static int __transport_execute_tasks(struct se_device *dev)
  2328. {
  2329. int error;
  2330. struct se_cmd *cmd = NULL;
  2331. struct se_task *task;
  2332. unsigned long flags;
  2333. /*
  2334. * Check if there is enough room in the device and HBA queue to send
  2335. * struct se_transport_task's to the selected transport.
  2336. */
  2337. check_depth:
  2338. spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
  2339. if (!(atomic_read(&dev->depth_left)) ||
  2340. !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
  2341. spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
  2342. return transport_tcq_window_closed(dev);
  2343. }
  2344. dev->dev_tcq_window_closed = 0;
  2345. spin_lock(&dev->execute_task_lock);
  2346. task = transport_get_task_from_execute_queue(dev);
  2347. spin_unlock(&dev->execute_task_lock);
  2348. if (!task) {
  2349. spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
  2350. return 0;
  2351. }
  2352. atomic_dec(&dev->depth_left);
  2353. atomic_dec(&SE_HBA(dev)->left_queue_depth);
  2354. spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
  2355. cmd = TASK_CMD(task);
  2356. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  2357. atomic_set(&task->task_active, 1);
  2358. atomic_set(&task->task_sent, 1);
  2359. atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
  2360. if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
  2361. T_TASK(cmd)->t_task_cdbs)
  2362. atomic_set(&cmd->transport_sent, 1);
  2363. transport_start_task_timer(task);
  2364. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2365. /*
  2366. * The struct se_cmd->transport_emulate_cdb() function pointer is used
  2367. * to grab REPORT_LUNS CDBs before they hit the
  2368. * struct se_subsystem_api->do_task() caller below.
  2369. */
  2370. if (cmd->transport_emulate_cdb) {
  2371. error = cmd->transport_emulate_cdb(cmd);
  2372. if (error != 0) {
  2373. cmd->transport_error_status = error;
  2374. atomic_set(&task->task_active, 0);
  2375. atomic_set(&cmd->transport_sent, 0);
  2376. transport_stop_tasks_for_cmd(cmd);
  2377. transport_generic_request_failure(cmd, dev, 0, 1);
  2378. goto check_depth;
  2379. }
  2380. /*
  2381. * Handle the successful completion for transport_emulate_cdb()
  2382. * for synchronous operation, following SCF_EMULATE_CDB_ASYNC
  2383. * Otherwise the caller is expected to complete the task with
  2384. * proper status.
  2385. */
  2386. if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
  2387. cmd->scsi_status = SAM_STAT_GOOD;
  2388. task->task_scsi_status = GOOD;
  2389. transport_complete_task(task, 1);
  2390. }
  2391. } else {
  2392. /*
  2393. * Currently for all virtual TCM plugins including IBLOCK, FILEIO and
  2394. * RAMDISK we use the internal transport_emulate_control_cdb() logic
  2395. * with struct se_subsystem_api callers for the primary SPC-3 TYPE_DISK
  2396. * LUN emulation code.
  2397. *
  2398. * For TCM/pSCSI and all other SCF_SCSI_DATA_SG_IO_CDB I/O tasks we
  2399. * call ->do_task() directly and let the underlying TCM subsystem plugin
  2400. * code handle the CDB emulation.
  2401. */
  2402. if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
  2403. (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
  2404. error = transport_emulate_control_cdb(task);
  2405. else
  2406. error = TRANSPORT(dev)->do_task(task);
  2407. if (error != 0) {
  2408. cmd->transport_error_status = error;
  2409. atomic_set(&task->task_active, 0);
  2410. atomic_set(&cmd->transport_sent, 0);
  2411. transport_stop_tasks_for_cmd(cmd);
  2412. transport_generic_request_failure(cmd, dev, 0, 1);
  2413. }
  2414. }
  2415. goto check_depth;
  2416. return 0;
  2417. }
  2418. void transport_new_cmd_failure(struct se_cmd *se_cmd)
  2419. {
  2420. unsigned long flags;
  2421. /*
  2422. * Any unsolicited data will get dumped for failed command inside of
  2423. * the fabric plugin
  2424. */
  2425. spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
  2426. se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
  2427. se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2428. spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
  2429. CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
  2430. }
  2431. static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
  2432. static inline u32 transport_get_sectors_6(
  2433. unsigned char *cdb,
  2434. struct se_cmd *cmd,
  2435. int *ret)
  2436. {
  2437. struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
  2438. /*
  2439. * Assume TYPE_DISK for non struct se_device objects.
  2440. * Use 8-bit sector value.
  2441. */
  2442. if (!dev)
  2443. goto type_disk;
  2444. /*
  2445. * Use 24-bit allocation length for TYPE_TAPE.
  2446. */
  2447. if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
  2448. return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
  2449. /*
  2450. * Everything else assume TYPE_DISK Sector CDB location.
  2451. * Use 8-bit sector value. SBC-3 says:
  2452. *
  2453. * A TRANSFER LENGTH field set to zero specifies that 256
  2454. * logical blocks shall be written. Any other value
  2455. * specifies the number of logical blocks that shall be
  2456. * written.
  2457. */
  2458. type_disk:
  2459. return cdb[4] ? : 256;
  2460. }
  2461. static inline u32 transport_get_sectors_10(
  2462. unsigned char *cdb,
  2463. struct se_cmd *cmd,
  2464. int *ret)
  2465. {
  2466. struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
  2467. /*
  2468. * Assume TYPE_DISK for non struct se_device objects.
  2469. * Use 16-bit sector value.
  2470. */
  2471. if (!dev)
  2472. goto type_disk;
  2473. /*
  2474. * XXX_10 is not defined in SSC, throw an exception
  2475. */
  2476. if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
  2477. *ret = -1;
  2478. return 0;
  2479. }
  2480. /*
  2481. * Everything else assume TYPE_DISK Sector CDB location.
  2482. * Use 16-bit sector value.
  2483. */
  2484. type_disk:
  2485. return (u32)(cdb[7] << 8) + cdb[8];
  2486. }
  2487. static inline u32 transport_get_sectors_12(
  2488. unsigned char *cdb,
  2489. struct se_cmd *cmd,
  2490. int *ret)
  2491. {
  2492. struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
  2493. /*
  2494. * Assume TYPE_DISK for non struct se_device objects.
  2495. * Use 32-bit sector value.
  2496. */
  2497. if (!dev)
  2498. goto type_disk;
  2499. /*
  2500. * XXX_12 is not defined in SSC, throw an exception
  2501. */
  2502. if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
  2503. *ret = -1;
  2504. return 0;
  2505. }
  2506. /*
  2507. * Everything else assume TYPE_DISK Sector CDB location.
  2508. * Use 32-bit sector value.
  2509. */
  2510. type_disk:
  2511. return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
  2512. }
  2513. static inline u32 transport_get_sectors_16(
  2514. unsigned char *cdb,
  2515. struct se_cmd *cmd,
  2516. int *ret)
  2517. {
  2518. struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
  2519. /*
  2520. * Assume TYPE_DISK for non struct se_device objects.
  2521. * Use 32-bit sector value.
  2522. */
  2523. if (!dev)
  2524. goto type_disk;
  2525. /*
  2526. * Use 24-bit allocation length for TYPE_TAPE.
  2527. */
  2528. if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
  2529. return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
  2530. type_disk:
  2531. return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
  2532. (cdb[12] << 8) + cdb[13];
  2533. }
  2534. /*
  2535. * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
  2536. */
  2537. static inline u32 transport_get_sectors_32(
  2538. unsigned char *cdb,
  2539. struct se_cmd *cmd,
  2540. int *ret)
  2541. {
  2542. /*
  2543. * Assume TYPE_DISK for non struct se_device objects.
  2544. * Use 32-bit sector value.
  2545. */
  2546. return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
  2547. (cdb[30] << 8) + cdb[31];
  2548. }
  2549. static inline u32 transport_get_size(
  2550. u32 sectors,
  2551. unsigned char *cdb,
  2552. struct se_cmd *cmd)
  2553. {
  2554. struct se_device *dev = SE_DEV(cmd);
  2555. if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
  2556. if (cdb[1] & 1) { /* sectors */
  2557. return DEV_ATTRIB(dev)->block_size * sectors;
  2558. } else /* bytes */
  2559. return sectors;
  2560. }
  2561. #if 0
  2562. printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
  2563. " %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
  2564. DEV_ATTRIB(dev)->block_size * sectors,
  2565. TRANSPORT(dev)->name);
  2566. #endif
  2567. return DEV_ATTRIB(dev)->block_size * sectors;
  2568. }
  2569. unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
  2570. {
  2571. unsigned char result = 0;
  2572. /*
  2573. * MSB
  2574. */
  2575. if ((val[0] >= 'a') && (val[0] <= 'f'))
  2576. result = ((val[0] - 'a' + 10) & 0xf) << 4;
  2577. else
  2578. if ((val[0] >= 'A') && (val[0] <= 'F'))
  2579. result = ((val[0] - 'A' + 10) & 0xf) << 4;
  2580. else /* digit */
  2581. result = ((val[0] - '0') & 0xf) << 4;
  2582. /*
  2583. * LSB
  2584. */
  2585. if ((val[1] >= 'a') && (val[1] <= 'f'))
  2586. result |= ((val[1] - 'a' + 10) & 0xf);
  2587. else
  2588. if ((val[1] >= 'A') && (val[1] <= 'F'))
  2589. result |= ((val[1] - 'A' + 10) & 0xf);
  2590. else /* digit */
  2591. result |= ((val[1] - '0') & 0xf);
  2592. return result;
  2593. }
  2594. EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
  2595. static void transport_xor_callback(struct se_cmd *cmd)
  2596. {
  2597. unsigned char *buf, *addr;
  2598. struct se_mem *se_mem;
  2599. unsigned int offset;
  2600. int i;
  2601. /*
  2602. * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
  2603. *
  2604. * 1) read the specified logical block(s);
  2605. * 2) transfer logical blocks from the data-out buffer;
  2606. * 3) XOR the logical blocks transferred from the data-out buffer with
  2607. * the logical blocks read, storing the resulting XOR data in a buffer;
  2608. * 4) if the DISABLE WRITE bit is set to zero, then write the logical
  2609. * blocks transferred from the data-out buffer; and
  2610. * 5) transfer the resulting XOR data to the data-in buffer.
  2611. */
  2612. buf = kmalloc(cmd->data_length, GFP_KERNEL);
  2613. if (!(buf)) {
  2614. printk(KERN_ERR "Unable to allocate xor_callback buf\n");
  2615. return;
  2616. }
  2617. /*
  2618. * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
  2619. * into the locally allocated *buf
  2620. */
  2621. transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
  2622. /*
  2623. * Now perform the XOR against the BIDI read memory located at
  2624. * T_TASK(cmd)->t_mem_bidi_list
  2625. */
  2626. offset = 0;
  2627. list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
  2628. addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
  2629. if (!(addr))
  2630. goto out;
  2631. for (i = 0; i < se_mem->se_len; i++)
  2632. *(addr + se_mem->se_off + i) ^= *(buf + offset + i);
  2633. offset += se_mem->se_len;
  2634. kunmap_atomic(addr, KM_USER0);
  2635. }
  2636. out:
  2637. kfree(buf);
  2638. }
  2639. /*
  2640. * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
  2641. */
  2642. static int transport_get_sense_data(struct se_cmd *cmd)
  2643. {
  2644. unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL;
  2645. struct se_device *dev;
  2646. struct se_task *task = NULL, *task_tmp;
  2647. unsigned long flags;
  2648. u32 offset = 0;
  2649. if (!SE_LUN(cmd)) {
  2650. printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
  2651. return -1;
  2652. }
  2653. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  2654. if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
  2655. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2656. return 0;
  2657. }
  2658. list_for_each_entry_safe(task, task_tmp,
  2659. &T_TASK(cmd)->t_task_list, t_list) {
  2660. if (!task->task_sense)
  2661. continue;
  2662. dev = task->se_dev;
  2663. if (!(dev))
  2664. continue;
  2665. if (!TRANSPORT(dev)->get_sense_buffer) {
  2666. printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
  2667. " is NULL\n");
  2668. continue;
  2669. }
  2670. sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
  2671. if (!(sense_buffer)) {
  2672. printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
  2673. " sense buffer for task with sense\n",
  2674. CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
  2675. continue;
  2676. }
  2677. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2678. offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
  2679. TRANSPORT_SENSE_BUFFER);
  2680. memcpy((void *)&buffer[offset], (void *)sense_buffer,
  2681. TRANSPORT_SENSE_BUFFER);
  2682. cmd->scsi_status = task->task_scsi_status;
  2683. /* Automatically padded */
  2684. cmd->scsi_sense_length =
  2685. (TRANSPORT_SENSE_BUFFER + offset);
  2686. printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
  2687. " and sense\n",
  2688. dev->se_hba->hba_id, TRANSPORT(dev)->name,
  2689. cmd->scsi_status);
  2690. return 0;
  2691. }
  2692. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  2693. return -1;
  2694. }
  2695. static int transport_allocate_resources(struct se_cmd *cmd)
  2696. {
  2697. u32 length = cmd->data_length;
  2698. if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
  2699. (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
  2700. return transport_generic_get_mem(cmd, length, PAGE_SIZE);
  2701. else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
  2702. return transport_generic_allocate_buf(cmd, length);
  2703. else
  2704. return 0;
  2705. }
  2706. static int
  2707. transport_handle_reservation_conflict(struct se_cmd *cmd)
  2708. {
  2709. cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
  2710. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2711. cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
  2712. cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
  2713. /*
  2714. * For UA Interlock Code 11b, a RESERVATION CONFLICT will
  2715. * establish a UNIT ATTENTION with PREVIOUS RESERVATION
  2716. * CONFLICT STATUS.
  2717. *
  2718. * See spc4r17, section 7.4.6 Control Mode Page, Table 349
  2719. */
  2720. if (SE_SESS(cmd) &&
  2721. DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
  2722. core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
  2723. cmd->orig_fe_lun, 0x2C,
  2724. ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
  2725. return -2;
  2726. }
  2727. /* transport_generic_cmd_sequencer():
  2728. *
  2729. * Generic Command Sequencer that should work for most DAS transport
  2730. * drivers.
  2731. *
  2732. * Called from transport_generic_allocate_tasks() in the $FABRIC_MOD
  2733. * RX Thread.
  2734. *
  2735. * FIXME: Need to support other SCSI OPCODES where as well.
  2736. */
  2737. static int transport_generic_cmd_sequencer(
  2738. struct se_cmd *cmd,
  2739. unsigned char *cdb)
  2740. {
  2741. struct se_device *dev = SE_DEV(cmd);
  2742. struct se_subsystem_dev *su_dev = dev->se_sub_dev;
  2743. int ret = 0, sector_ret = 0, passthrough;
  2744. u32 sectors = 0, size = 0, pr_reg_type = 0;
  2745. u16 service_action;
  2746. u8 alua_ascq = 0;
  2747. /*
  2748. * Check for an existing UNIT ATTENTION condition
  2749. */
  2750. if (core_scsi3_ua_check(cmd, cdb) < 0) {
  2751. cmd->transport_wait_for_tasks =
  2752. &transport_nop_wait_for_tasks;
  2753. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2754. cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
  2755. return -2;
  2756. }
  2757. /*
  2758. * Check status of Asymmetric Logical Unit Assignment port
  2759. */
  2760. ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
  2761. if (ret != 0) {
  2762. cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
  2763. /*
  2764. * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
  2765. * The ALUA additional sense code qualifier (ASCQ) is determined
  2766. * by the ALUA primary or secondary access state..
  2767. */
  2768. if (ret > 0) {
  2769. #if 0
  2770. printk(KERN_INFO "[%s]: ALUA TG Port not available,"
  2771. " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
  2772. CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
  2773. #endif
  2774. transport_set_sense_codes(cmd, 0x04, alua_ascq);
  2775. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  2776. cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
  2777. return -2;
  2778. }
  2779. goto out_invalid_cdb_field;
  2780. }
  2781. /*
  2782. * Check status for SPC-3 Persistent Reservations
  2783. */
  2784. if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
  2785. if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
  2786. cmd, cdb, pr_reg_type) != 0)
  2787. return transport_handle_reservation_conflict(cmd);
  2788. /*
  2789. * This means the CDB is allowed for the SCSI Initiator port
  2790. * when said port is *NOT* holding the legacy SPC-2 or
  2791. * SPC-3 Persistent Reservation.
  2792. */
  2793. }
  2794. switch (cdb[0]) {
  2795. case READ_6:
  2796. sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
  2797. if (sector_ret)
  2798. goto out_unsupported_cdb;
  2799. size = transport_get_size(sectors, cdb, cmd);
  2800. cmd->transport_split_cdb = &split_cdb_XX_6;
  2801. T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
  2802. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2803. break;
  2804. case READ_10:
  2805. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2806. if (sector_ret)
  2807. goto out_unsupported_cdb;
  2808. size = transport_get_size(sectors, cdb, cmd);
  2809. cmd->transport_split_cdb = &split_cdb_XX_10;
  2810. T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
  2811. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2812. break;
  2813. case READ_12:
  2814. sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
  2815. if (sector_ret)
  2816. goto out_unsupported_cdb;
  2817. size = transport_get_size(sectors, cdb, cmd);
  2818. cmd->transport_split_cdb = &split_cdb_XX_12;
  2819. T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
  2820. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2821. break;
  2822. case READ_16:
  2823. sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  2824. if (sector_ret)
  2825. goto out_unsupported_cdb;
  2826. size = transport_get_size(sectors, cdb, cmd);
  2827. cmd->transport_split_cdb = &split_cdb_XX_16;
  2828. T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
  2829. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2830. break;
  2831. case WRITE_6:
  2832. sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
  2833. if (sector_ret)
  2834. goto out_unsupported_cdb;
  2835. size = transport_get_size(sectors, cdb, cmd);
  2836. cmd->transport_split_cdb = &split_cdb_XX_6;
  2837. T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
  2838. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2839. break;
  2840. case WRITE_10:
  2841. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2842. if (sector_ret)
  2843. goto out_unsupported_cdb;
  2844. size = transport_get_size(sectors, cdb, cmd);
  2845. cmd->transport_split_cdb = &split_cdb_XX_10;
  2846. T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
  2847. T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
  2848. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2849. break;
  2850. case WRITE_12:
  2851. sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
  2852. if (sector_ret)
  2853. goto out_unsupported_cdb;
  2854. size = transport_get_size(sectors, cdb, cmd);
  2855. cmd->transport_split_cdb = &split_cdb_XX_12;
  2856. T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
  2857. T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
  2858. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2859. break;
  2860. case WRITE_16:
  2861. sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  2862. if (sector_ret)
  2863. goto out_unsupported_cdb;
  2864. size = transport_get_size(sectors, cdb, cmd);
  2865. cmd->transport_split_cdb = &split_cdb_XX_16;
  2866. T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
  2867. T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
  2868. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2869. break;
  2870. case XDWRITEREAD_10:
  2871. if ((cmd->data_direction != DMA_TO_DEVICE) ||
  2872. !(T_TASK(cmd)->t_tasks_bidi))
  2873. goto out_invalid_cdb_field;
  2874. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  2875. if (sector_ret)
  2876. goto out_unsupported_cdb;
  2877. size = transport_get_size(sectors, cdb, cmd);
  2878. cmd->transport_split_cdb = &split_cdb_XX_10;
  2879. T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
  2880. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2881. passthrough = (TRANSPORT(dev)->transport_type ==
  2882. TRANSPORT_PLUGIN_PHBA_PDEV);
  2883. /*
  2884. * Skip the remaining assignments for TCM/PSCSI passthrough
  2885. */
  2886. if (passthrough)
  2887. break;
  2888. /*
  2889. * Setup BIDI XOR callback to be run during transport_generic_complete_ok()
  2890. */
  2891. cmd->transport_complete_callback = &transport_xor_callback;
  2892. T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
  2893. break;
  2894. case VARIABLE_LENGTH_CMD:
  2895. service_action = get_unaligned_be16(&cdb[8]);
  2896. /*
  2897. * Determine if this is TCM/PSCSI device and we should disable
  2898. * internal emulation for this CDB.
  2899. */
  2900. passthrough = (TRANSPORT(dev)->transport_type ==
  2901. TRANSPORT_PLUGIN_PHBA_PDEV);
  2902. switch (service_action) {
  2903. case XDWRITEREAD_32:
  2904. sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
  2905. if (sector_ret)
  2906. goto out_unsupported_cdb;
  2907. size = transport_get_size(sectors, cdb, cmd);
  2908. /*
  2909. * Use WRITE_32 and READ_32 opcodes for the emulated
  2910. * XDWRITE_READ_32 logic.
  2911. */
  2912. cmd->transport_split_cdb = &split_cdb_XX_32;
  2913. T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
  2914. cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
  2915. /*
  2916. * Skip the remaining assignments for TCM/PSCSI passthrough
  2917. */
  2918. if (passthrough)
  2919. break;
  2920. /*
  2921. * Setup BIDI XOR callback to be run during
  2922. * transport_generic_complete_ok()
  2923. */
  2924. cmd->transport_complete_callback = &transport_xor_callback;
  2925. T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
  2926. break;
  2927. case WRITE_SAME_32:
  2928. sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
  2929. if (sector_ret)
  2930. goto out_unsupported_cdb;
  2931. size = transport_get_size(sectors, cdb, cmd);
  2932. T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
  2933. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2934. /*
  2935. * Skip the remaining assignments for TCM/PSCSI passthrough
  2936. */
  2937. if (passthrough)
  2938. break;
  2939. if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
  2940. printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
  2941. " bits not supported for Block Discard"
  2942. " Emulation\n");
  2943. goto out_invalid_cdb_field;
  2944. }
  2945. /*
  2946. * Currently for the emulated case we only accept
  2947. * tpws with the UNMAP=1 bit set.
  2948. */
  2949. if (!(cdb[10] & 0x08)) {
  2950. printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
  2951. " supported for Block Discard Emulation\n");
  2952. goto out_invalid_cdb_field;
  2953. }
  2954. break;
  2955. default:
  2956. printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
  2957. " 0x%04x not supported\n", service_action);
  2958. goto out_unsupported_cdb;
  2959. }
  2960. break;
  2961. case 0xa3:
  2962. if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
  2963. /* MAINTENANCE_IN from SCC-2 */
  2964. /*
  2965. * Check for emulated MI_REPORT_TARGET_PGS.
  2966. */
  2967. if (cdb[1] == MI_REPORT_TARGET_PGS) {
  2968. cmd->transport_emulate_cdb =
  2969. (T10_ALUA(su_dev)->alua_type ==
  2970. SPC3_ALUA_EMULATED) ?
  2971. &core_emulate_report_target_port_groups :
  2972. NULL;
  2973. }
  2974. size = (cdb[6] << 24) | (cdb[7] << 16) |
  2975. (cdb[8] << 8) | cdb[9];
  2976. } else {
  2977. /* GPCMD_SEND_KEY from multi media commands */
  2978. size = (cdb[8] << 8) + cdb[9];
  2979. }
  2980. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  2981. break;
  2982. case MODE_SELECT:
  2983. size = cdb[4];
  2984. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2985. break;
  2986. case MODE_SELECT_10:
  2987. size = (cdb[7] << 8) + cdb[8];
  2988. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  2989. break;
  2990. case MODE_SENSE:
  2991. size = cdb[4];
  2992. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  2993. break;
  2994. case MODE_SENSE_10:
  2995. case GPCMD_READ_BUFFER_CAPACITY:
  2996. case GPCMD_SEND_OPC:
  2997. case LOG_SELECT:
  2998. case LOG_SENSE:
  2999. size = (cdb[7] << 8) + cdb[8];
  3000. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3001. break;
  3002. case READ_BLOCK_LIMITS:
  3003. size = READ_BLOCK_LEN;
  3004. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3005. break;
  3006. case GPCMD_GET_CONFIGURATION:
  3007. case GPCMD_READ_FORMAT_CAPACITIES:
  3008. case GPCMD_READ_DISC_INFO:
  3009. case GPCMD_READ_TRACK_RZONE_INFO:
  3010. size = (cdb[7] << 8) + cdb[8];
  3011. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  3012. break;
  3013. case PERSISTENT_RESERVE_IN:
  3014. case PERSISTENT_RESERVE_OUT:
  3015. cmd->transport_emulate_cdb =
  3016. (T10_RES(su_dev)->res_type ==
  3017. SPC3_PERSISTENT_RESERVATIONS) ?
  3018. &core_scsi3_emulate_pr : NULL;
  3019. size = (cdb[7] << 8) + cdb[8];
  3020. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3021. break;
  3022. case GPCMD_MECHANISM_STATUS:
  3023. case GPCMD_READ_DVD_STRUCTURE:
  3024. size = (cdb[8] << 8) + cdb[9];
  3025. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  3026. break;
  3027. case READ_POSITION:
  3028. size = READ_POSITION_LEN;
  3029. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3030. break;
  3031. case 0xa4:
  3032. if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
  3033. /* MAINTENANCE_OUT from SCC-2
  3034. *
  3035. * Check for emulated MO_SET_TARGET_PGS.
  3036. */
  3037. if (cdb[1] == MO_SET_TARGET_PGS) {
  3038. cmd->transport_emulate_cdb =
  3039. (T10_ALUA(su_dev)->alua_type ==
  3040. SPC3_ALUA_EMULATED) ?
  3041. &core_emulate_set_target_port_groups :
  3042. NULL;
  3043. }
  3044. size = (cdb[6] << 24) | (cdb[7] << 16) |
  3045. (cdb[8] << 8) | cdb[9];
  3046. } else {
  3047. /* GPCMD_REPORT_KEY from multi media commands */
  3048. size = (cdb[8] << 8) + cdb[9];
  3049. }
  3050. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3051. break;
  3052. case INQUIRY:
  3053. size = (cdb[3] << 8) + cdb[4];
  3054. /*
  3055. * Do implict HEAD_OF_QUEUE processing for INQUIRY.
  3056. * See spc4r17 section 5.3
  3057. */
  3058. if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  3059. cmd->sam_task_attr = MSG_HEAD_TAG;
  3060. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3061. break;
  3062. case READ_BUFFER:
  3063. size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
  3064. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3065. break;
  3066. case READ_CAPACITY:
  3067. size = READ_CAP_LEN;
  3068. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3069. break;
  3070. case READ_MEDIA_SERIAL_NUMBER:
  3071. case SECURITY_PROTOCOL_IN:
  3072. case SECURITY_PROTOCOL_OUT:
  3073. size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
  3074. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3075. break;
  3076. case SERVICE_ACTION_IN:
  3077. case ACCESS_CONTROL_IN:
  3078. case ACCESS_CONTROL_OUT:
  3079. case EXTENDED_COPY:
  3080. case READ_ATTRIBUTE:
  3081. case RECEIVE_COPY_RESULTS:
  3082. case WRITE_ATTRIBUTE:
  3083. size = (cdb[10] << 24) | (cdb[11] << 16) |
  3084. (cdb[12] << 8) | cdb[13];
  3085. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3086. break;
  3087. case RECEIVE_DIAGNOSTIC:
  3088. case SEND_DIAGNOSTIC:
  3089. size = (cdb[3] << 8) | cdb[4];
  3090. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3091. break;
  3092. /* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
  3093. #if 0
  3094. case GPCMD_READ_CD:
  3095. sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
  3096. size = (2336 * sectors);
  3097. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3098. break;
  3099. #endif
  3100. case READ_TOC:
  3101. size = cdb[8];
  3102. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3103. break;
  3104. case REQUEST_SENSE:
  3105. size = cdb[4];
  3106. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3107. break;
  3108. case READ_ELEMENT_STATUS:
  3109. size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
  3110. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3111. break;
  3112. case WRITE_BUFFER:
  3113. size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
  3114. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3115. break;
  3116. case RESERVE:
  3117. case RESERVE_10:
  3118. /*
  3119. * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
  3120. * Assume the passthrough or $FABRIC_MOD will tell us about it.
  3121. */
  3122. if (cdb[0] == RESERVE_10)
  3123. size = (cdb[7] << 8) | cdb[8];
  3124. else
  3125. size = cmd->data_length;
  3126. /*
  3127. * Setup the legacy emulated handler for SPC-2 and
  3128. * >= SPC-3 compatible reservation handling (CRH=1)
  3129. * Otherwise, we assume the underlying SCSI logic is
  3130. * is running in SPC_PASSTHROUGH, and wants reservations
  3131. * emulation disabled.
  3132. */
  3133. cmd->transport_emulate_cdb =
  3134. (T10_RES(su_dev)->res_type !=
  3135. SPC_PASSTHROUGH) ?
  3136. &core_scsi2_emulate_crh : NULL;
  3137. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  3138. break;
  3139. case RELEASE:
  3140. case RELEASE_10:
  3141. /*
  3142. * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
  3143. * Assume the passthrough or $FABRIC_MOD will tell us about it.
  3144. */
  3145. if (cdb[0] == RELEASE_10)
  3146. size = (cdb[7] << 8) | cdb[8];
  3147. else
  3148. size = cmd->data_length;
  3149. cmd->transport_emulate_cdb =
  3150. (T10_RES(su_dev)->res_type !=
  3151. SPC_PASSTHROUGH) ?
  3152. &core_scsi2_emulate_crh : NULL;
  3153. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  3154. break;
  3155. case SYNCHRONIZE_CACHE:
  3156. case 0x91: /* SYNCHRONIZE_CACHE_16: */
  3157. /*
  3158. * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
  3159. */
  3160. if (cdb[0] == SYNCHRONIZE_CACHE) {
  3161. sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
  3162. T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
  3163. } else {
  3164. sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  3165. T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
  3166. }
  3167. if (sector_ret)
  3168. goto out_unsupported_cdb;
  3169. size = transport_get_size(sectors, cdb, cmd);
  3170. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  3171. /*
  3172. * For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
  3173. */
  3174. if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
  3175. break;
  3176. /*
  3177. * Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
  3178. * for SYNCHRONIZE_CACHE* Immed=1 case in __transport_execute_tasks()
  3179. */
  3180. cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
  3181. /*
  3182. * Check to ensure that LBA + Range does not exceed past end of
  3183. * device.
  3184. */
  3185. if (transport_get_sectors(cmd) < 0)
  3186. goto out_invalid_cdb_field;
  3187. break;
  3188. case UNMAP:
  3189. size = get_unaligned_be16(&cdb[7]);
  3190. passthrough = (TRANSPORT(dev)->transport_type ==
  3191. TRANSPORT_PLUGIN_PHBA_PDEV);
  3192. /*
  3193. * Determine if the received UNMAP used to for direct passthrough
  3194. * into Linux/SCSI with struct request via TCM/pSCSI or we are
  3195. * signaling the use of internal transport_generic_unmap() emulation
  3196. * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
  3197. * subsystem plugin backstores.
  3198. */
  3199. if (!(passthrough))
  3200. cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
  3201. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3202. break;
  3203. case WRITE_SAME_16:
  3204. sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
  3205. if (sector_ret)
  3206. goto out_unsupported_cdb;
  3207. size = transport_get_size(sectors, cdb, cmd);
  3208. T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
  3209. passthrough = (TRANSPORT(dev)->transport_type ==
  3210. TRANSPORT_PLUGIN_PHBA_PDEV);
  3211. /*
  3212. * Determine if the received WRITE_SAME_16 is used to for direct
  3213. * passthrough into Linux/SCSI with struct request via TCM/pSCSI
  3214. * or we are signaling the use of internal WRITE_SAME + UNMAP=1
  3215. * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
  3216. * TCM/FILEIO subsystem plugin backstores.
  3217. */
  3218. if (!(passthrough)) {
  3219. if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
  3220. printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
  3221. " bits not supported for Block Discard"
  3222. " Emulation\n");
  3223. goto out_invalid_cdb_field;
  3224. }
  3225. /*
  3226. * Currently for the emulated case we only accept
  3227. * tpws with the UNMAP=1 bit set.
  3228. */
  3229. if (!(cdb[1] & 0x08)) {
  3230. printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
  3231. " supported for Block Discard Emulation\n");
  3232. goto out_invalid_cdb_field;
  3233. }
  3234. }
  3235. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
  3236. break;
  3237. case ALLOW_MEDIUM_REMOVAL:
  3238. case GPCMD_CLOSE_TRACK:
  3239. case ERASE:
  3240. case INITIALIZE_ELEMENT_STATUS:
  3241. case GPCMD_LOAD_UNLOAD:
  3242. case REZERO_UNIT:
  3243. case SEEK_10:
  3244. case GPCMD_SET_SPEED:
  3245. case SPACE:
  3246. case START_STOP:
  3247. case TEST_UNIT_READY:
  3248. case VERIFY:
  3249. case WRITE_FILEMARKS:
  3250. case MOVE_MEDIUM:
  3251. cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
  3252. break;
  3253. case REPORT_LUNS:
  3254. cmd->transport_emulate_cdb =
  3255. &transport_core_report_lun_response;
  3256. size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
  3257. /*
  3258. * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
  3259. * See spc4r17 section 5.3
  3260. */
  3261. if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  3262. cmd->sam_task_attr = MSG_HEAD_TAG;
  3263. cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
  3264. break;
  3265. default:
  3266. printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
  3267. " 0x%02x, sending CHECK_CONDITION.\n",
  3268. CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
  3269. cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
  3270. goto out_unsupported_cdb;
  3271. }
  3272. if (size != cmd->data_length) {
  3273. printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
  3274. " %u does not match SCSI CDB Length: %u for SAM Opcode:"
  3275. " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
  3276. cmd->data_length, size, cdb[0]);
  3277. cmd->cmd_spdtl = size;
  3278. if (cmd->data_direction == DMA_TO_DEVICE) {
  3279. printk(KERN_ERR "Rejecting underflow/overflow"
  3280. " WRITE data\n");
  3281. goto out_invalid_cdb_field;
  3282. }
  3283. /*
  3284. * Reject READ_* or WRITE_* with overflow/underflow for
  3285. * type SCF_SCSI_DATA_SG_IO_CDB.
  3286. */
  3287. if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) {
  3288. printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
  3289. " CDB on non 512-byte sector setup subsystem"
  3290. " plugin: %s\n", TRANSPORT(dev)->name);
  3291. /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
  3292. goto out_invalid_cdb_field;
  3293. }
  3294. if (size > cmd->data_length) {
  3295. cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
  3296. cmd->residual_count = (size - cmd->data_length);
  3297. } else {
  3298. cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
  3299. cmd->residual_count = (cmd->data_length - size);
  3300. }
  3301. cmd->data_length = size;
  3302. }
  3303. transport_set_supported_SAM_opcode(cmd);
  3304. return ret;
  3305. out_unsupported_cdb:
  3306. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  3307. cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
  3308. return -2;
  3309. out_invalid_cdb_field:
  3310. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  3311. cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
  3312. return -2;
  3313. }
  3314. static inline void transport_release_tasks(struct se_cmd *);
  3315. /*
  3316. * This function will copy a contiguous *src buffer into a destination
  3317. * struct scatterlist array.
  3318. */
  3319. static void transport_memcpy_write_contig(
  3320. struct se_cmd *cmd,
  3321. struct scatterlist *sg_d,
  3322. unsigned char *src)
  3323. {
  3324. u32 i = 0, length = 0, total_length = cmd->data_length;
  3325. void *dst;
  3326. while (total_length) {
  3327. length = sg_d[i].length;
  3328. if (length > total_length)
  3329. length = total_length;
  3330. dst = sg_virt(&sg_d[i]);
  3331. memcpy(dst, src, length);
  3332. if (!(total_length -= length))
  3333. return;
  3334. src += length;
  3335. i++;
  3336. }
  3337. }
  3338. /*
  3339. * This function will copy a struct scatterlist array *sg_s into a destination
  3340. * contiguous *dst buffer.
  3341. */
  3342. static void transport_memcpy_read_contig(
  3343. struct se_cmd *cmd,
  3344. unsigned char *dst,
  3345. struct scatterlist *sg_s)
  3346. {
  3347. u32 i = 0, length = 0, total_length = cmd->data_length;
  3348. void *src;
  3349. while (total_length) {
  3350. length = sg_s[i].length;
  3351. if (length > total_length)
  3352. length = total_length;
  3353. src = sg_virt(&sg_s[i]);
  3354. memcpy(dst, src, length);
  3355. if (!(total_length -= length))
  3356. return;
  3357. dst += length;
  3358. i++;
  3359. }
  3360. }
  3361. static void transport_memcpy_se_mem_read_contig(
  3362. struct se_cmd *cmd,
  3363. unsigned char *dst,
  3364. struct list_head *se_mem_list)
  3365. {
  3366. struct se_mem *se_mem;
  3367. void *src;
  3368. u32 length = 0, total_length = cmd->data_length;
  3369. list_for_each_entry(se_mem, se_mem_list, se_list) {
  3370. length = se_mem->se_len;
  3371. if (length > total_length)
  3372. length = total_length;
  3373. src = page_address(se_mem->se_page) + se_mem->se_off;
  3374. memcpy(dst, src, length);
  3375. if (!(total_length -= length))
  3376. return;
  3377. dst += length;
  3378. }
  3379. }
  3380. /*
  3381. * Called from transport_generic_complete_ok() and
  3382. * transport_generic_request_failure() to determine which dormant/delayed
  3383. * and ordered cmds need to have their tasks added to the execution queue.
  3384. */
  3385. static void transport_complete_task_attr(struct se_cmd *cmd)
  3386. {
  3387. struct se_device *dev = SE_DEV(cmd);
  3388. struct se_cmd *cmd_p, *cmd_tmp;
  3389. int new_active_tasks = 0;
  3390. if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
  3391. atomic_dec(&dev->simple_cmds);
  3392. smp_mb__after_atomic_dec();
  3393. dev->dev_cur_ordered_id++;
  3394. DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
  3395. " SIMPLE: %u\n", dev->dev_cur_ordered_id,
  3396. cmd->se_ordered_id);
  3397. } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
  3398. atomic_dec(&dev->dev_hoq_count);
  3399. smp_mb__after_atomic_dec();
  3400. dev->dev_cur_ordered_id++;
  3401. DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
  3402. " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
  3403. cmd->se_ordered_id);
  3404. } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
  3405. spin_lock(&dev->ordered_cmd_lock);
  3406. list_del(&cmd->se_ordered_list);
  3407. atomic_dec(&dev->dev_ordered_sync);
  3408. smp_mb__after_atomic_dec();
  3409. spin_unlock(&dev->ordered_cmd_lock);
  3410. dev->dev_cur_ordered_id++;
  3411. DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
  3412. " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
  3413. }
  3414. /*
  3415. * Process all commands up to the last received
  3416. * ORDERED task attribute which requires another blocking
  3417. * boundary
  3418. */
  3419. spin_lock(&dev->delayed_cmd_lock);
  3420. list_for_each_entry_safe(cmd_p, cmd_tmp,
  3421. &dev->delayed_cmd_list, se_delayed_list) {
  3422. list_del(&cmd_p->se_delayed_list);
  3423. spin_unlock(&dev->delayed_cmd_lock);
  3424. DEBUG_STA("Calling add_tasks() for"
  3425. " cmd_p: 0x%02x Task Attr: 0x%02x"
  3426. " Dormant -> Active, se_ordered_id: %u\n",
  3427. T_TASK(cmd_p)->t_task_cdb[0],
  3428. cmd_p->sam_task_attr, cmd_p->se_ordered_id);
  3429. transport_add_tasks_from_cmd(cmd_p);
  3430. new_active_tasks++;
  3431. spin_lock(&dev->delayed_cmd_lock);
  3432. if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
  3433. break;
  3434. }
  3435. spin_unlock(&dev->delayed_cmd_lock);
  3436. /*
  3437. * If new tasks have become active, wake up the transport thread
  3438. * to do the processing of the Active tasks.
  3439. */
  3440. if (new_active_tasks != 0)
  3441. wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
  3442. }
  3443. static void transport_generic_complete_ok(struct se_cmd *cmd)
  3444. {
  3445. int reason = 0;
  3446. /*
  3447. * Check if we need to move delayed/dormant tasks from cmds on the
  3448. * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
  3449. * Attribute.
  3450. */
  3451. if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
  3452. transport_complete_task_attr(cmd);
  3453. /*
  3454. * Check if we need to retrieve a sense buffer from
  3455. * the struct se_cmd in question.
  3456. */
  3457. if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
  3458. if (transport_get_sense_data(cmd) < 0)
  3459. reason = TCM_NON_EXISTENT_LUN;
  3460. /*
  3461. * Only set when an struct se_task->task_scsi_status returned
  3462. * a non GOOD status.
  3463. */
  3464. if (cmd->scsi_status) {
  3465. transport_send_check_condition_and_sense(
  3466. cmd, reason, 1);
  3467. transport_lun_remove_cmd(cmd);
  3468. transport_cmd_check_stop_to_fabric(cmd);
  3469. return;
  3470. }
  3471. }
  3472. /*
  3473. * Check for a callback, used by amongst other things
  3474. * XDWRITE_READ_10 emulation.
  3475. */
  3476. if (cmd->transport_complete_callback)
  3477. cmd->transport_complete_callback(cmd);
  3478. switch (cmd->data_direction) {
  3479. case DMA_FROM_DEVICE:
  3480. spin_lock(&cmd->se_lun->lun_sep_lock);
  3481. if (SE_LUN(cmd)->lun_sep) {
  3482. SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
  3483. cmd->data_length;
  3484. }
  3485. spin_unlock(&cmd->se_lun->lun_sep_lock);
  3486. /*
  3487. * If enabled by TCM fabirc module pre-registered SGL
  3488. * memory, perform the memcpy() from the TCM internal
  3489. * contigious buffer back to the original SGL.
  3490. */
  3491. if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
  3492. transport_memcpy_write_contig(cmd,
  3493. T_TASK(cmd)->t_task_pt_sgl,
  3494. T_TASK(cmd)->t_task_buf);
  3495. CMD_TFO(cmd)->queue_data_in(cmd);
  3496. break;
  3497. case DMA_TO_DEVICE:
  3498. spin_lock(&cmd->se_lun->lun_sep_lock);
  3499. if (SE_LUN(cmd)->lun_sep) {
  3500. SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
  3501. cmd->data_length;
  3502. }
  3503. spin_unlock(&cmd->se_lun->lun_sep_lock);
  3504. /*
  3505. * Check if we need to send READ payload for BIDI-COMMAND
  3506. */
  3507. if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
  3508. spin_lock(&cmd->se_lun->lun_sep_lock);
  3509. if (SE_LUN(cmd)->lun_sep) {
  3510. SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
  3511. cmd->data_length;
  3512. }
  3513. spin_unlock(&cmd->se_lun->lun_sep_lock);
  3514. CMD_TFO(cmd)->queue_data_in(cmd);
  3515. break;
  3516. }
  3517. /* Fall through for DMA_TO_DEVICE */
  3518. case DMA_NONE:
  3519. CMD_TFO(cmd)->queue_status(cmd);
  3520. break;
  3521. default:
  3522. break;
  3523. }
  3524. transport_lun_remove_cmd(cmd);
  3525. transport_cmd_check_stop_to_fabric(cmd);
  3526. }
  3527. static void transport_free_dev_tasks(struct se_cmd *cmd)
  3528. {
  3529. struct se_task *task, *task_tmp;
  3530. unsigned long flags;
  3531. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  3532. list_for_each_entry_safe(task, task_tmp,
  3533. &T_TASK(cmd)->t_task_list, t_list) {
  3534. if (atomic_read(&task->task_active))
  3535. continue;
  3536. kfree(task->task_sg_bidi);
  3537. kfree(task->task_sg);
  3538. list_del(&task->t_list);
  3539. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  3540. if (task->se_dev)
  3541. TRANSPORT(task->se_dev)->free_task(task);
  3542. else
  3543. printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
  3544. task->task_no);
  3545. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  3546. }
  3547. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  3548. }
  3549. static inline void transport_free_pages(struct se_cmd *cmd)
  3550. {
  3551. struct se_mem *se_mem, *se_mem_tmp;
  3552. int free_page = 1;
  3553. if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
  3554. free_page = 0;
  3555. if (cmd->se_dev->transport->do_se_mem_map)
  3556. free_page = 0;
  3557. if (T_TASK(cmd)->t_task_buf) {
  3558. kfree(T_TASK(cmd)->t_task_buf);
  3559. T_TASK(cmd)->t_task_buf = NULL;
  3560. return;
  3561. }
  3562. /*
  3563. * Caller will handle releasing of struct se_mem.
  3564. */
  3565. if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
  3566. return;
  3567. if (!(T_TASK(cmd)->t_tasks_se_num))
  3568. return;
  3569. list_for_each_entry_safe(se_mem, se_mem_tmp,
  3570. T_TASK(cmd)->t_mem_list, se_list) {
  3571. /*
  3572. * We only release call __free_page(struct se_mem->se_page) when
  3573. * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
  3574. */
  3575. if (free_page)
  3576. __free_page(se_mem->se_page);
  3577. list_del(&se_mem->se_list);
  3578. kmem_cache_free(se_mem_cache, se_mem);
  3579. }
  3580. if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
  3581. list_for_each_entry_safe(se_mem, se_mem_tmp,
  3582. T_TASK(cmd)->t_mem_bidi_list, se_list) {
  3583. /*
  3584. * We only release call __free_page(struct se_mem->se_page) when
  3585. * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
  3586. */
  3587. if (free_page)
  3588. __free_page(se_mem->se_page);
  3589. list_del(&se_mem->se_list);
  3590. kmem_cache_free(se_mem_cache, se_mem);
  3591. }
  3592. }
  3593. kfree(T_TASK(cmd)->t_mem_bidi_list);
  3594. T_TASK(cmd)->t_mem_bidi_list = NULL;
  3595. kfree(T_TASK(cmd)->t_mem_list);
  3596. T_TASK(cmd)->t_mem_list = NULL;
  3597. T_TASK(cmd)->t_tasks_se_num = 0;
  3598. }
  3599. static inline void transport_release_tasks(struct se_cmd *cmd)
  3600. {
  3601. transport_free_dev_tasks(cmd);
  3602. }
  3603. static inline int transport_dec_and_check(struct se_cmd *cmd)
  3604. {
  3605. unsigned long flags;
  3606. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  3607. if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
  3608. if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
  3609. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
  3610. flags);
  3611. return 1;
  3612. }
  3613. }
  3614. if (atomic_read(&T_TASK(cmd)->t_se_count)) {
  3615. if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
  3616. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
  3617. flags);
  3618. return 1;
  3619. }
  3620. }
  3621. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  3622. return 0;
  3623. }
  3624. static void transport_release_fe_cmd(struct se_cmd *cmd)
  3625. {
  3626. unsigned long flags;
  3627. if (transport_dec_and_check(cmd))
  3628. return;
  3629. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  3630. if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
  3631. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  3632. goto free_pages;
  3633. }
  3634. atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
  3635. transport_all_task_dev_remove_state(cmd);
  3636. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  3637. transport_release_tasks(cmd);
  3638. free_pages:
  3639. transport_free_pages(cmd);
  3640. transport_free_se_cmd(cmd);
  3641. CMD_TFO(cmd)->release_cmd_direct(cmd);
  3642. }
  3643. static int transport_generic_remove(
  3644. struct se_cmd *cmd,
  3645. int release_to_pool,
  3646. int session_reinstatement)
  3647. {
  3648. unsigned long flags;
  3649. if (!(T_TASK(cmd)))
  3650. goto release_cmd;
  3651. if (transport_dec_and_check(cmd)) {
  3652. if (session_reinstatement) {
  3653. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  3654. transport_all_task_dev_remove_state(cmd);
  3655. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
  3656. flags);
  3657. }
  3658. return 1;
  3659. }
  3660. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  3661. if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
  3662. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  3663. goto free_pages;
  3664. }
  3665. atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
  3666. transport_all_task_dev_remove_state(cmd);
  3667. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  3668. transport_release_tasks(cmd);
  3669. free_pages:
  3670. transport_free_pages(cmd);
  3671. release_cmd:
  3672. if (release_to_pool) {
  3673. transport_release_cmd_to_pool(cmd);
  3674. } else {
  3675. transport_free_se_cmd(cmd);
  3676. CMD_TFO(cmd)->release_cmd_direct(cmd);
  3677. }
  3678. return 0;
  3679. }
  3680. /*
  3681. * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
  3682. * @cmd: Associated se_cmd descriptor
  3683. * @mem: SGL style memory for TCM WRITE / READ
  3684. * @sg_mem_num: Number of SGL elements
  3685. * @mem_bidi_in: SGL style memory for TCM BIDI READ
  3686. * @sg_mem_bidi_num: Number of BIDI READ SGL elements
  3687. *
  3688. * Return: nonzero return cmd was rejected for -ENOMEM or inproper usage
  3689. * of parameters.
  3690. */
  3691. int transport_generic_map_mem_to_cmd(
  3692. struct se_cmd *cmd,
  3693. struct scatterlist *mem,
  3694. u32 sg_mem_num,
  3695. struct scatterlist *mem_bidi_in,
  3696. u32 sg_mem_bidi_num)
  3697. {
  3698. u32 se_mem_cnt_out = 0;
  3699. int ret;
  3700. if (!(mem) || !(sg_mem_num))
  3701. return 0;
  3702. /*
  3703. * Passed *mem will contain a list_head containing preformatted
  3704. * struct se_mem elements...
  3705. */
  3706. if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
  3707. if ((mem_bidi_in) || (sg_mem_bidi_num)) {
  3708. printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
  3709. " with BIDI-COMMAND\n");
  3710. return -ENOSYS;
  3711. }
  3712. T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
  3713. T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
  3714. cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
  3715. return 0;
  3716. }
  3717. /*
  3718. * Otherwise, assume the caller is passing a struct scatterlist
  3719. * array from include/linux/scatterlist.h
  3720. */
  3721. if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
  3722. (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
  3723. /*
  3724. * For CDB using TCM struct se_mem linked list scatterlist memory
  3725. * processed into a TCM struct se_subsystem_dev, we do the mapping
  3726. * from the passed physical memory to struct se_mem->se_page here.
  3727. */
  3728. T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
  3729. if (!(T_TASK(cmd)->t_mem_list))
  3730. return -ENOMEM;
  3731. ret = transport_map_sg_to_mem(cmd,
  3732. T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
  3733. if (ret < 0)
  3734. return -ENOMEM;
  3735. T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
  3736. /*
  3737. * Setup BIDI READ list of struct se_mem elements
  3738. */
  3739. if ((mem_bidi_in) && (sg_mem_bidi_num)) {
  3740. T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
  3741. if (!(T_TASK(cmd)->t_mem_bidi_list)) {
  3742. kfree(T_TASK(cmd)->t_mem_list);
  3743. return -ENOMEM;
  3744. }
  3745. se_mem_cnt_out = 0;
  3746. ret = transport_map_sg_to_mem(cmd,
  3747. T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
  3748. &se_mem_cnt_out);
  3749. if (ret < 0) {
  3750. kfree(T_TASK(cmd)->t_mem_list);
  3751. return -ENOMEM;
  3752. }
  3753. T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
  3754. }
  3755. cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
  3756. } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
  3757. if (mem_bidi_in || sg_mem_bidi_num) {
  3758. printk(KERN_ERR "BIDI-Commands not supported using "
  3759. "SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
  3760. return -ENOSYS;
  3761. }
  3762. /*
  3763. * For incoming CDBs using a contiguous buffer internall with TCM,
  3764. * save the passed struct scatterlist memory. After TCM storage object
  3765. * processing has completed for this struct se_cmd, TCM core will call
  3766. * transport_memcpy_[write,read]_contig() as necessary from
  3767. * transport_generic_complete_ok() and transport_write_pending() in order
  3768. * to copy the TCM buffer to/from the original passed *mem in SGL ->
  3769. * struct scatterlist format.
  3770. */
  3771. cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
  3772. T_TASK(cmd)->t_task_pt_sgl = mem;
  3773. }
  3774. return 0;
  3775. }
  3776. EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
  3777. static inline long long transport_dev_end_lba(struct se_device *dev)
  3778. {
  3779. return dev->transport->get_blocks(dev) + 1;
  3780. }
  3781. static int transport_get_sectors(struct se_cmd *cmd)
  3782. {
  3783. struct se_device *dev = SE_DEV(cmd);
  3784. T_TASK(cmd)->t_tasks_sectors =
  3785. (cmd->data_length / DEV_ATTRIB(dev)->block_size);
  3786. if (!(T_TASK(cmd)->t_tasks_sectors))
  3787. T_TASK(cmd)->t_tasks_sectors = 1;
  3788. if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
  3789. return 0;
  3790. if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
  3791. transport_dev_end_lba(dev)) {
  3792. printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
  3793. " transport_dev_end_lba(): %llu\n",
  3794. T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
  3795. transport_dev_end_lba(dev));
  3796. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  3797. cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
  3798. return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
  3799. }
  3800. return 0;
  3801. }
  3802. static int transport_new_cmd_obj(struct se_cmd *cmd)
  3803. {
  3804. struct se_device *dev = SE_DEV(cmd);
  3805. u32 task_cdbs = 0, rc;
  3806. if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
  3807. task_cdbs++;
  3808. T_TASK(cmd)->t_task_cdbs++;
  3809. } else {
  3810. int set_counts = 1;
  3811. /*
  3812. * Setup any BIDI READ tasks and memory from
  3813. * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
  3814. * are queued first for the non pSCSI passthrough case.
  3815. */
  3816. if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
  3817. (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
  3818. rc = transport_generic_get_cdb_count(cmd,
  3819. T_TASK(cmd)->t_task_lba,
  3820. T_TASK(cmd)->t_tasks_sectors,
  3821. DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
  3822. set_counts);
  3823. if (!(rc)) {
  3824. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  3825. cmd->scsi_sense_reason =
  3826. TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  3827. return PYX_TRANSPORT_LU_COMM_FAILURE;
  3828. }
  3829. set_counts = 0;
  3830. }
  3831. /*
  3832. * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
  3833. * Note for BIDI transfers this will contain the WRITE payload
  3834. */
  3835. task_cdbs = transport_generic_get_cdb_count(cmd,
  3836. T_TASK(cmd)->t_task_lba,
  3837. T_TASK(cmd)->t_tasks_sectors,
  3838. cmd->data_direction, T_TASK(cmd)->t_mem_list,
  3839. set_counts);
  3840. if (!(task_cdbs)) {
  3841. cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
  3842. cmd->scsi_sense_reason =
  3843. TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
  3844. return PYX_TRANSPORT_LU_COMM_FAILURE;
  3845. }
  3846. T_TASK(cmd)->t_task_cdbs += task_cdbs;
  3847. #if 0
  3848. printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
  3849. " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
  3850. T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
  3851. T_TASK(cmd)->t_task_cdbs);
  3852. #endif
  3853. }
  3854. atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
  3855. atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
  3856. atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
  3857. return 0;
  3858. }
  3859. static struct list_head *transport_init_se_mem_list(void)
  3860. {
  3861. struct list_head *se_mem_list;
  3862. se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
  3863. if (!(se_mem_list)) {
  3864. printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
  3865. return NULL;
  3866. }
  3867. INIT_LIST_HEAD(se_mem_list);
  3868. return se_mem_list;
  3869. }
  3870. static int
  3871. transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
  3872. {
  3873. unsigned char *buf;
  3874. struct se_mem *se_mem;
  3875. T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
  3876. if (!(T_TASK(cmd)->t_mem_list))
  3877. return -ENOMEM;
  3878. /*
  3879. * If the device uses memory mapping this is enough.
  3880. */
  3881. if (cmd->se_dev->transport->do_se_mem_map)
  3882. return 0;
  3883. /*
  3884. * Setup BIDI-COMMAND READ list of struct se_mem elements
  3885. */
  3886. if (T_TASK(cmd)->t_tasks_bidi) {
  3887. T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
  3888. if (!(T_TASK(cmd)->t_mem_bidi_list)) {
  3889. kfree(T_TASK(cmd)->t_mem_list);
  3890. return -ENOMEM;
  3891. }
  3892. }
  3893. while (length) {
  3894. se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
  3895. if (!(se_mem)) {
  3896. printk(KERN_ERR "Unable to allocate struct se_mem\n");
  3897. goto out;
  3898. }
  3899. /* #warning FIXME Allocate contigous pages for struct se_mem elements */
  3900. se_mem->se_page = alloc_pages(GFP_KERNEL, 0);
  3901. if (!(se_mem->se_page)) {
  3902. printk(KERN_ERR "alloc_pages() failed\n");
  3903. goto out;
  3904. }
  3905. buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
  3906. if (!(buf)) {
  3907. printk(KERN_ERR "kmap_atomic() failed\n");
  3908. goto out;
  3909. }
  3910. INIT_LIST_HEAD(&se_mem->se_list);
  3911. se_mem->se_len = (length > dma_size) ? dma_size : length;
  3912. memset(buf, 0, se_mem->se_len);
  3913. kunmap_atomic(buf, KM_IRQ0);
  3914. list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
  3915. T_TASK(cmd)->t_tasks_se_num++;
  3916. DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
  3917. " Offset(%u)\n", se_mem->se_page, se_mem->se_len,
  3918. se_mem->se_off);
  3919. length -= se_mem->se_len;
  3920. }
  3921. DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
  3922. T_TASK(cmd)->t_tasks_se_num);
  3923. return 0;
  3924. out:
  3925. if (se_mem)
  3926. __free_pages(se_mem->se_page, 0);
  3927. kmem_cache_free(se_mem_cache, se_mem);
  3928. return -1;
  3929. }
  3930. u32 transport_calc_sg_num(
  3931. struct se_task *task,
  3932. struct se_mem *in_se_mem,
  3933. u32 task_offset)
  3934. {
  3935. struct se_cmd *se_cmd = task->task_se_cmd;
  3936. struct se_device *se_dev = SE_DEV(se_cmd);
  3937. struct se_mem *se_mem = in_se_mem;
  3938. struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
  3939. u32 sg_length, task_size = task->task_size, task_sg_num_padded;
  3940. while (task_size != 0) {
  3941. DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
  3942. " se_mem->se_off(%u) task_offset(%u)\n",
  3943. se_mem->se_page, se_mem->se_len,
  3944. se_mem->se_off, task_offset);
  3945. if (task_offset == 0) {
  3946. if (task_size >= se_mem->se_len) {
  3947. sg_length = se_mem->se_len;
  3948. if (!(list_is_last(&se_mem->se_list,
  3949. T_TASK(se_cmd)->t_mem_list)))
  3950. se_mem = list_entry(se_mem->se_list.next,
  3951. struct se_mem, se_list);
  3952. } else {
  3953. sg_length = task_size;
  3954. task_size -= sg_length;
  3955. goto next;
  3956. }
  3957. DEBUG_SC("sg_length(%u) task_size(%u)\n",
  3958. sg_length, task_size);
  3959. } else {
  3960. if ((se_mem->se_len - task_offset) > task_size) {
  3961. sg_length = task_size;
  3962. task_size -= sg_length;
  3963. goto next;
  3964. } else {
  3965. sg_length = (se_mem->se_len - task_offset);
  3966. if (!(list_is_last(&se_mem->se_list,
  3967. T_TASK(se_cmd)->t_mem_list)))
  3968. se_mem = list_entry(se_mem->se_list.next,
  3969. struct se_mem, se_list);
  3970. }
  3971. DEBUG_SC("sg_length(%u) task_size(%u)\n",
  3972. sg_length, task_size);
  3973. task_offset = 0;
  3974. }
  3975. task_size -= sg_length;
  3976. next:
  3977. DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
  3978. task->task_no, task_size);
  3979. task->task_sg_num++;
  3980. }
  3981. /*
  3982. * Check if the fabric module driver is requesting that all
  3983. * struct se_task->task_sg[] be chained together.. If so,
  3984. * then allocate an extra padding SG entry for linking and
  3985. * marking the end of the chained SGL.
  3986. */
  3987. if (tfo->task_sg_chaining) {
  3988. task_sg_num_padded = (task->task_sg_num + 1);
  3989. task->task_padded_sg = 1;
  3990. } else
  3991. task_sg_num_padded = task->task_sg_num;
  3992. task->task_sg = kzalloc(task_sg_num_padded *
  3993. sizeof(struct scatterlist), GFP_KERNEL);
  3994. if (!(task->task_sg)) {
  3995. printk(KERN_ERR "Unable to allocate memory for"
  3996. " task->task_sg\n");
  3997. return 0;
  3998. }
  3999. sg_init_table(&task->task_sg[0], task_sg_num_padded);
  4000. /*
  4001. * Setup task->task_sg_bidi for SCSI READ payload for
  4002. * TCM/pSCSI passthrough if present for BIDI-COMMAND
  4003. */
  4004. if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
  4005. (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
  4006. task->task_sg_bidi = kzalloc(task_sg_num_padded *
  4007. sizeof(struct scatterlist), GFP_KERNEL);
  4008. if (!(task->task_sg_bidi)) {
  4009. printk(KERN_ERR "Unable to allocate memory for"
  4010. " task->task_sg_bidi\n");
  4011. return 0;
  4012. }
  4013. sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
  4014. }
  4015. /*
  4016. * For the chaining case, setup the proper end of SGL for the
  4017. * initial submission struct task into struct se_subsystem_api.
  4018. * This will be cleared later by transport_do_task_sg_chain()
  4019. */
  4020. if (task->task_padded_sg) {
  4021. sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
  4022. /*
  4023. * Added the 'if' check before marking end of bi-directional
  4024. * scatterlist (which gets created only in case of request
  4025. * (RD + WR).
  4026. */
  4027. if (task->task_sg_bidi)
  4028. sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
  4029. }
  4030. DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
  4031. " task_sg_num_padded(%u)\n", task->task_sg_num,
  4032. task_sg_num_padded);
  4033. return task->task_sg_num;
  4034. }
  4035. static inline int transport_set_tasks_sectors_disk(
  4036. struct se_task *task,
  4037. struct se_device *dev,
  4038. unsigned long long lba,
  4039. u32 sectors,
  4040. int *max_sectors_set)
  4041. {
  4042. if ((lba + sectors) > transport_dev_end_lba(dev)) {
  4043. task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
  4044. if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
  4045. task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
  4046. *max_sectors_set = 1;
  4047. }
  4048. } else {
  4049. if (sectors > DEV_ATTRIB(dev)->max_sectors) {
  4050. task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
  4051. *max_sectors_set = 1;
  4052. } else
  4053. task->task_sectors = sectors;
  4054. }
  4055. return 0;
  4056. }
  4057. static inline int transport_set_tasks_sectors_non_disk(
  4058. struct se_task *task,
  4059. struct se_device *dev,
  4060. unsigned long long lba,
  4061. u32 sectors,
  4062. int *max_sectors_set)
  4063. {
  4064. if (sectors > DEV_ATTRIB(dev)->max_sectors) {
  4065. task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
  4066. *max_sectors_set = 1;
  4067. } else
  4068. task->task_sectors = sectors;
  4069. return 0;
  4070. }
  4071. static inline int transport_set_tasks_sectors(
  4072. struct se_task *task,
  4073. struct se_device *dev,
  4074. unsigned long long lba,
  4075. u32 sectors,
  4076. int *max_sectors_set)
  4077. {
  4078. return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
  4079. transport_set_tasks_sectors_disk(task, dev, lba, sectors,
  4080. max_sectors_set) :
  4081. transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
  4082. max_sectors_set);
  4083. }
  4084. static int transport_map_sg_to_mem(
  4085. struct se_cmd *cmd,
  4086. struct list_head *se_mem_list,
  4087. void *in_mem,
  4088. u32 *se_mem_cnt)
  4089. {
  4090. struct se_mem *se_mem;
  4091. struct scatterlist *sg;
  4092. u32 sg_count = 1, cmd_size = cmd->data_length;
  4093. if (!in_mem) {
  4094. printk(KERN_ERR "No source scatterlist\n");
  4095. return -1;
  4096. }
  4097. sg = (struct scatterlist *)in_mem;
  4098. while (cmd_size) {
  4099. se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
  4100. if (!(se_mem)) {
  4101. printk(KERN_ERR "Unable to allocate struct se_mem\n");
  4102. return -1;
  4103. }
  4104. INIT_LIST_HEAD(&se_mem->se_list);
  4105. DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
  4106. " sg_page: %p offset: %d length: %d\n", cmd_size,
  4107. sg_page(sg), sg->offset, sg->length);
  4108. se_mem->se_page = sg_page(sg);
  4109. se_mem->se_off = sg->offset;
  4110. if (cmd_size > sg->length) {
  4111. se_mem->se_len = sg->length;
  4112. sg = sg_next(sg);
  4113. sg_count++;
  4114. } else
  4115. se_mem->se_len = cmd_size;
  4116. cmd_size -= se_mem->se_len;
  4117. DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
  4118. *se_mem_cnt, cmd_size);
  4119. DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
  4120. se_mem->se_page, se_mem->se_off, se_mem->se_len);
  4121. list_add_tail(&se_mem->se_list, se_mem_list);
  4122. (*se_mem_cnt)++;
  4123. }
  4124. DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
  4125. " struct se_mem\n", sg_count, *se_mem_cnt);
  4126. if (sg_count != *se_mem_cnt)
  4127. BUG();
  4128. return 0;
  4129. }
  4130. /* transport_map_mem_to_sg():
  4131. *
  4132. *
  4133. */
  4134. int transport_map_mem_to_sg(
  4135. struct se_task *task,
  4136. struct list_head *se_mem_list,
  4137. void *in_mem,
  4138. struct se_mem *in_se_mem,
  4139. struct se_mem **out_se_mem,
  4140. u32 *se_mem_cnt,
  4141. u32 *task_offset)
  4142. {
  4143. struct se_cmd *se_cmd = task->task_se_cmd;
  4144. struct se_mem *se_mem = in_se_mem;
  4145. struct scatterlist *sg = (struct scatterlist *)in_mem;
  4146. u32 task_size = task->task_size, sg_no = 0;
  4147. if (!sg) {
  4148. printk(KERN_ERR "Unable to locate valid struct"
  4149. " scatterlist pointer\n");
  4150. return -1;
  4151. }
  4152. while (task_size != 0) {
  4153. /*
  4154. * Setup the contigious array of scatterlists for
  4155. * this struct se_task.
  4156. */
  4157. sg_assign_page(sg, se_mem->se_page);
  4158. if (*task_offset == 0) {
  4159. sg->offset = se_mem->se_off;
  4160. if (task_size >= se_mem->se_len) {
  4161. sg->length = se_mem->se_len;
  4162. if (!(list_is_last(&se_mem->se_list,
  4163. T_TASK(se_cmd)->t_mem_list))) {
  4164. se_mem = list_entry(se_mem->se_list.next,
  4165. struct se_mem, se_list);
  4166. (*se_mem_cnt)++;
  4167. }
  4168. } else {
  4169. sg->length = task_size;
  4170. /*
  4171. * Determine if we need to calculate an offset
  4172. * into the struct se_mem on the next go around..
  4173. */
  4174. task_size -= sg->length;
  4175. if (!(task_size))
  4176. *task_offset = sg->length;
  4177. goto next;
  4178. }
  4179. } else {
  4180. sg->offset = (*task_offset + se_mem->se_off);
  4181. if ((se_mem->se_len - *task_offset) > task_size) {
  4182. sg->length = task_size;
  4183. /*
  4184. * Determine if we need to calculate an offset
  4185. * into the struct se_mem on the next go around..
  4186. */
  4187. task_size -= sg->length;
  4188. if (!(task_size))
  4189. *task_offset += sg->length;
  4190. goto next;
  4191. } else {
  4192. sg->length = (se_mem->se_len - *task_offset);
  4193. if (!(list_is_last(&se_mem->se_list,
  4194. T_TASK(se_cmd)->t_mem_list))) {
  4195. se_mem = list_entry(se_mem->se_list.next,
  4196. struct se_mem, se_list);
  4197. (*se_mem_cnt)++;
  4198. }
  4199. }
  4200. *task_offset = 0;
  4201. }
  4202. task_size -= sg->length;
  4203. next:
  4204. DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
  4205. " task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
  4206. sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
  4207. sg_no++;
  4208. if (!(task_size))
  4209. break;
  4210. sg = sg_next(sg);
  4211. if (task_size > se_cmd->data_length)
  4212. BUG();
  4213. }
  4214. *out_se_mem = se_mem;
  4215. DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
  4216. " SGs\n", task->task_no, *se_mem_cnt, sg_no);
  4217. return 0;
  4218. }
  4219. /*
  4220. * This function can be used by HW target mode drivers to create a linked
  4221. * scatterlist from all contiguously allocated struct se_task->task_sg[].
  4222. * This is intended to be called during the completion path by TCM Core
  4223. * when struct target_core_fabric_ops->check_task_sg_chaining is enabled.
  4224. */
  4225. void transport_do_task_sg_chain(struct se_cmd *cmd)
  4226. {
  4227. struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
  4228. struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
  4229. struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
  4230. struct se_task *task;
  4231. struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
  4232. u32 task_sg_num = 0, sg_count = 0;
  4233. int i;
  4234. if (tfo->task_sg_chaining == 0) {
  4235. printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
  4236. " %s\n", tfo->get_fabric_name());
  4237. dump_stack();
  4238. return;
  4239. }
  4240. /*
  4241. * Walk the struct se_task list and setup scatterlist chains
  4242. * for each contiguosly allocated struct se_task->task_sg[].
  4243. */
  4244. list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
  4245. if (!(task->task_sg) || !(task->task_padded_sg))
  4246. continue;
  4247. if (sg_head && sg_link) {
  4248. sg_head_cur = &task->task_sg[0];
  4249. sg_link_cur = &task->task_sg[task->task_sg_num];
  4250. /*
  4251. * Either add chain or mark end of scatterlist
  4252. */
  4253. if (!(list_is_last(&task->t_list,
  4254. &T_TASK(cmd)->t_task_list))) {
  4255. /*
  4256. * Clear existing SGL termination bit set in
  4257. * transport_calc_sg_num(), see sg_mark_end()
  4258. */
  4259. sg_end_cur = &task->task_sg[task->task_sg_num - 1];
  4260. sg_end_cur->page_link &= ~0x02;
  4261. sg_chain(sg_head, task_sg_num, sg_head_cur);
  4262. sg_count += task->task_sg_num;
  4263. task_sg_num = (task->task_sg_num + 1);
  4264. } else {
  4265. sg_chain(sg_head, task_sg_num, sg_head_cur);
  4266. sg_count += task->task_sg_num;
  4267. task_sg_num = task->task_sg_num;
  4268. }
  4269. sg_head = sg_head_cur;
  4270. sg_link = sg_link_cur;
  4271. continue;
  4272. }
  4273. sg_head = sg_first = &task->task_sg[0];
  4274. sg_link = &task->task_sg[task->task_sg_num];
  4275. /*
  4276. * Check for single task..
  4277. */
  4278. if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
  4279. /*
  4280. * Clear existing SGL termination bit set in
  4281. * transport_calc_sg_num(), see sg_mark_end()
  4282. */
  4283. sg_end = &task->task_sg[task->task_sg_num - 1];
  4284. sg_end->page_link &= ~0x02;
  4285. sg_count += task->task_sg_num;
  4286. task_sg_num = (task->task_sg_num + 1);
  4287. } else {
  4288. sg_count += task->task_sg_num;
  4289. task_sg_num = task->task_sg_num;
  4290. }
  4291. }
  4292. /*
  4293. * Setup the starting pointer and total t_tasks_sg_linked_no including
  4294. * padding SGs for linking and to mark the end.
  4295. */
  4296. T_TASK(cmd)->t_tasks_sg_chained = sg_first;
  4297. T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
  4298. DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
  4299. " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
  4300. T_TASK(cmd)->t_tasks_sg_chained_no);
  4301. for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
  4302. T_TASK(cmd)->t_tasks_sg_chained_no, i) {
  4303. DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
  4304. i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
  4305. if (sg_is_chain(sg))
  4306. DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
  4307. if (sg_is_last(sg))
  4308. DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
  4309. }
  4310. }
  4311. EXPORT_SYMBOL(transport_do_task_sg_chain);
  4312. static int transport_do_se_mem_map(
  4313. struct se_device *dev,
  4314. struct se_task *task,
  4315. struct list_head *se_mem_list,
  4316. void *in_mem,
  4317. struct se_mem *in_se_mem,
  4318. struct se_mem **out_se_mem,
  4319. u32 *se_mem_cnt,
  4320. u32 *task_offset_in)
  4321. {
  4322. u32 task_offset = *task_offset_in;
  4323. int ret = 0;
  4324. /*
  4325. * se_subsystem_api_t->do_se_mem_map is used when internal allocation
  4326. * has been done by the transport plugin.
  4327. */
  4328. if (TRANSPORT(dev)->do_se_mem_map) {
  4329. ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
  4330. in_mem, in_se_mem, out_se_mem, se_mem_cnt,
  4331. task_offset_in);
  4332. if (ret == 0)
  4333. T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
  4334. return ret;
  4335. }
  4336. BUG_ON(list_empty(se_mem_list));
  4337. /*
  4338. * This is the normal path for all normal non BIDI and BIDI-COMMAND
  4339. * WRITE payloads.. If we need to do BIDI READ passthrough for
  4340. * TCM/pSCSI the first call to transport_do_se_mem_map ->
  4341. * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
  4342. * allocation for task->task_sg_bidi, and the subsequent call to
  4343. * transport_do_se_mem_map() from transport_generic_get_cdb_count()
  4344. */
  4345. if (!(task->task_sg_bidi)) {
  4346. /*
  4347. * Assume default that transport plugin speaks preallocated
  4348. * scatterlists.
  4349. */
  4350. if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
  4351. return -1;
  4352. /*
  4353. * struct se_task->task_sg now contains the struct scatterlist array.
  4354. */
  4355. return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
  4356. in_se_mem, out_se_mem, se_mem_cnt,
  4357. task_offset_in);
  4358. }
  4359. /*
  4360. * Handle the se_mem_list -> struct task->task_sg_bidi
  4361. * memory map for the extra BIDI READ payload
  4362. */
  4363. return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
  4364. in_se_mem, out_se_mem, se_mem_cnt,
  4365. task_offset_in);
  4366. }
  4367. static u32 transport_generic_get_cdb_count(
  4368. struct se_cmd *cmd,
  4369. unsigned long long lba,
  4370. u32 sectors,
  4371. enum dma_data_direction data_direction,
  4372. struct list_head *mem_list,
  4373. int set_counts)
  4374. {
  4375. unsigned char *cdb = NULL;
  4376. struct se_task *task;
  4377. struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
  4378. struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
  4379. struct se_device *dev = SE_DEV(cmd);
  4380. int max_sectors_set = 0, ret;
  4381. u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
  4382. if (!mem_list) {
  4383. printk(KERN_ERR "mem_list is NULL in transport_generic_get"
  4384. "_cdb_count()\n");
  4385. return 0;
  4386. }
  4387. /*
  4388. * While using RAMDISK_DR backstores is the only case where
  4389. * mem_list will ever be empty at this point.
  4390. */
  4391. if (!(list_empty(mem_list)))
  4392. se_mem = list_entry(mem_list->next, struct se_mem, se_list);
  4393. /*
  4394. * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
  4395. * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
  4396. */
  4397. if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
  4398. !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
  4399. (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
  4400. se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
  4401. struct se_mem, se_list);
  4402. while (sectors) {
  4403. DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
  4404. CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
  4405. transport_dev_end_lba(dev));
  4406. task = transport_generic_get_task(cmd, data_direction);
  4407. if (!(task))
  4408. goto out;
  4409. transport_set_tasks_sectors(task, dev, lba, sectors,
  4410. &max_sectors_set);
  4411. task->task_lba = lba;
  4412. lba += task->task_sectors;
  4413. sectors -= task->task_sectors;
  4414. task->task_size = (task->task_sectors *
  4415. DEV_ATTRIB(dev)->block_size);
  4416. cdb = TRANSPORT(dev)->get_cdb(task);
  4417. if ((cdb)) {
  4418. memcpy(cdb, T_TASK(cmd)->t_task_cdb,
  4419. scsi_command_size(T_TASK(cmd)->t_task_cdb));
  4420. cmd->transport_split_cdb(task->task_lba,
  4421. &task->task_sectors, cdb);
  4422. }
  4423. /*
  4424. * Perform the SE OBJ plugin and/or Transport plugin specific
  4425. * mapping for T_TASK(cmd)->t_mem_list. And setup the
  4426. * task->task_sg and if necessary task->task_sg_bidi
  4427. */
  4428. ret = transport_do_se_mem_map(dev, task, mem_list,
  4429. NULL, se_mem, &se_mem_lout, &se_mem_cnt,
  4430. &task_offset_in);
  4431. if (ret < 0)
  4432. goto out;
  4433. se_mem = se_mem_lout;
  4434. /*
  4435. * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
  4436. * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
  4437. *
  4438. * Note that the first call to transport_do_se_mem_map() above will
  4439. * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
  4440. * -> transport_calc_sg_num(), and the second here will do the
  4441. * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
  4442. */
  4443. if (task->task_sg_bidi != NULL) {
  4444. ret = transport_do_se_mem_map(dev, task,
  4445. T_TASK(cmd)->t_mem_bidi_list, NULL,
  4446. se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
  4447. &task_offset_in);
  4448. if (ret < 0)
  4449. goto out;
  4450. se_mem_bidi = se_mem_bidi_lout;
  4451. }
  4452. task_cdbs++;
  4453. DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
  4454. task_cdbs, task->task_sg_num);
  4455. if (max_sectors_set) {
  4456. max_sectors_set = 0;
  4457. continue;
  4458. }
  4459. if (!sectors)
  4460. break;
  4461. }
  4462. if (set_counts) {
  4463. atomic_inc(&T_TASK(cmd)->t_fe_count);
  4464. atomic_inc(&T_TASK(cmd)->t_se_count);
  4465. }
  4466. DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
  4467. CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
  4468. ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
  4469. return task_cdbs;
  4470. out:
  4471. return 0;
  4472. }
  4473. static int
  4474. transport_map_control_cmd_to_task(struct se_cmd *cmd)
  4475. {
  4476. struct se_device *dev = SE_DEV(cmd);
  4477. unsigned char *cdb;
  4478. struct se_task *task;
  4479. int ret;
  4480. task = transport_generic_get_task(cmd, cmd->data_direction);
  4481. if (!task)
  4482. return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
  4483. cdb = TRANSPORT(dev)->get_cdb(task);
  4484. if (cdb)
  4485. memcpy(cdb, cmd->t_task->t_task_cdb,
  4486. scsi_command_size(cmd->t_task->t_task_cdb));
  4487. task->task_size = cmd->data_length;
  4488. task->task_sg_num =
  4489. (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
  4490. atomic_inc(&cmd->t_task->t_fe_count);
  4491. atomic_inc(&cmd->t_task->t_se_count);
  4492. if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
  4493. struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
  4494. u32 se_mem_cnt = 0, task_offset = 0;
  4495. if (!list_empty(T_TASK(cmd)->t_mem_list))
  4496. se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
  4497. struct se_mem, se_list);
  4498. ret = transport_do_se_mem_map(dev, task,
  4499. cmd->t_task->t_mem_list, NULL, se_mem,
  4500. &se_mem_lout, &se_mem_cnt, &task_offset);
  4501. if (ret < 0)
  4502. return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
  4503. if (dev->transport->map_task_SG)
  4504. return dev->transport->map_task_SG(task);
  4505. return 0;
  4506. } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
  4507. if (dev->transport->map_task_non_SG)
  4508. return dev->transport->map_task_non_SG(task);
  4509. return 0;
  4510. } else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
  4511. if (dev->transport->cdb_none)
  4512. return dev->transport->cdb_none(task);
  4513. return 0;
  4514. } else {
  4515. BUG();
  4516. return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
  4517. }
  4518. }
  4519. /* transport_generic_new_cmd(): Called from transport_processing_thread()
  4520. *
  4521. * Allocate storage transport resources from a set of values predefined
  4522. * by transport_generic_cmd_sequencer() from the iSCSI Target RX process.
  4523. * Any non zero return here is treated as an "out of resource' op here.
  4524. */
  4525. /*
  4526. * Generate struct se_task(s) and/or their payloads for this CDB.
  4527. */
  4528. static int transport_generic_new_cmd(struct se_cmd *cmd)
  4529. {
  4530. struct se_portal_group *se_tpg;
  4531. struct se_task *task;
  4532. struct se_device *dev = SE_DEV(cmd);
  4533. int ret = 0;
  4534. /*
  4535. * Determine is the TCM fabric module has already allocated physical
  4536. * memory, and is directly calling transport_generic_map_mem_to_cmd()
  4537. * to setup beforehand the linked list of physical memory at
  4538. * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
  4539. */
  4540. if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
  4541. ret = transport_allocate_resources(cmd);
  4542. if (ret < 0)
  4543. return ret;
  4544. }
  4545. ret = transport_get_sectors(cmd);
  4546. if (ret < 0)
  4547. return ret;
  4548. ret = transport_new_cmd_obj(cmd);
  4549. if (ret < 0)
  4550. return ret;
  4551. /*
  4552. * Determine if the calling TCM fabric module is talking to
  4553. * Linux/NET via kernel sockets and needs to allocate a
  4554. * struct iovec array to complete the struct se_cmd
  4555. */
  4556. se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
  4557. if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
  4558. ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
  4559. if (ret < 0)
  4560. return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
  4561. }
  4562. if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
  4563. list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
  4564. if (atomic_read(&task->task_sent))
  4565. continue;
  4566. if (!dev->transport->map_task_SG)
  4567. continue;
  4568. ret = dev->transport->map_task_SG(task);
  4569. if (ret < 0)
  4570. return ret;
  4571. }
  4572. } else {
  4573. ret = transport_map_control_cmd_to_task(cmd);
  4574. if (ret < 0)
  4575. return ret;
  4576. }
  4577. /*
  4578. * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
  4579. * This WRITE struct se_cmd (and all of its associated struct se_task's)
  4580. * will be added to the struct se_device execution queue after its WRITE
  4581. * data has arrived. (ie: It gets handled by the transport processing
  4582. * thread a second time)
  4583. */
  4584. if (cmd->data_direction == DMA_TO_DEVICE) {
  4585. transport_add_tasks_to_state_queue(cmd);
  4586. return transport_generic_write_pending(cmd);
  4587. }
  4588. /*
  4589. * Everything else but a WRITE, add the struct se_cmd's struct se_task's
  4590. * to the execution queue.
  4591. */
  4592. transport_execute_tasks(cmd);
  4593. return 0;
  4594. }
  4595. /* transport_generic_process_write():
  4596. *
  4597. *
  4598. */
  4599. void transport_generic_process_write(struct se_cmd *cmd)
  4600. {
  4601. #if 0
  4602. /*
  4603. * Copy SCSI Presented DTL sector(s) from received buffers allocated to
  4604. * original EDTL
  4605. */
  4606. if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
  4607. if (!T_TASK(cmd)->t_tasks_se_num) {
  4608. unsigned char *dst, *buf =
  4609. (unsigned char *)T_TASK(cmd)->t_task_buf;
  4610. dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
  4611. if (!(dst)) {
  4612. printk(KERN_ERR "Unable to allocate memory for"
  4613. " WRITE underflow\n");
  4614. transport_generic_request_failure(cmd, NULL,
  4615. PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
  4616. return;
  4617. }
  4618. memcpy(dst, buf, cmd->cmd_spdtl);
  4619. kfree(T_TASK(cmd)->t_task_buf);
  4620. T_TASK(cmd)->t_task_buf = dst;
  4621. } else {
  4622. struct scatterlist *sg =
  4623. (struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
  4624. struct scatterlist *orig_sg;
  4625. orig_sg = kzalloc(sizeof(struct scatterlist) *
  4626. T_TASK(cmd)->t_tasks_se_num,
  4627. GFP_KERNEL))) {
  4628. if (!(orig_sg)) {
  4629. printk(KERN_ERR "Unable to allocate memory"
  4630. " for WRITE underflow\n");
  4631. transport_generic_request_failure(cmd, NULL,
  4632. PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
  4633. return;
  4634. }
  4635. memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
  4636. sizeof(struct scatterlist) *
  4637. T_TASK(cmd)->t_tasks_se_num);
  4638. cmd->data_length = cmd->cmd_spdtl;
  4639. /*
  4640. * FIXME, clear out original struct se_task and state
  4641. * information.
  4642. */
  4643. if (transport_generic_new_cmd(cmd) < 0) {
  4644. transport_generic_request_failure(cmd, NULL,
  4645. PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
  4646. kfree(orig_sg);
  4647. return;
  4648. }
  4649. transport_memcpy_write_sg(cmd, orig_sg);
  4650. }
  4651. }
  4652. #endif
  4653. transport_execute_tasks(cmd);
  4654. }
  4655. EXPORT_SYMBOL(transport_generic_process_write);
  4656. /* transport_generic_write_pending():
  4657. *
  4658. *
  4659. */
  4660. static int transport_generic_write_pending(struct se_cmd *cmd)
  4661. {
  4662. unsigned long flags;
  4663. int ret;
  4664. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  4665. cmd->t_state = TRANSPORT_WRITE_PENDING;
  4666. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  4667. /*
  4668. * For the TCM control CDBs using a contiguous buffer, do the memcpy
  4669. * from the passed Linux/SCSI struct scatterlist located at
  4670. * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
  4671. * T_TASK(se_cmd)->t_task_buf.
  4672. */
  4673. if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
  4674. transport_memcpy_read_contig(cmd,
  4675. T_TASK(cmd)->t_task_buf,
  4676. T_TASK(cmd)->t_task_pt_sgl);
  4677. /*
  4678. * Clear the se_cmd for WRITE_PENDING status in order to set
  4679. * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
  4680. * can be called from HW target mode interrupt code. This is safe
  4681. * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
  4682. * because the se_cmd->se_lun pointer is not being cleared.
  4683. */
  4684. transport_cmd_check_stop(cmd, 1, 0);
  4685. /*
  4686. * Call the fabric write_pending function here to let the
  4687. * frontend know that WRITE buffers are ready.
  4688. */
  4689. ret = CMD_TFO(cmd)->write_pending(cmd);
  4690. if (ret < 0)
  4691. return ret;
  4692. return PYX_TRANSPORT_WRITE_PENDING;
  4693. }
  4694. /* transport_release_cmd_to_pool():
  4695. *
  4696. *
  4697. */
  4698. void transport_release_cmd_to_pool(struct se_cmd *cmd)
  4699. {
  4700. BUG_ON(!T_TASK(cmd));
  4701. BUG_ON(!CMD_TFO(cmd));
  4702. transport_free_se_cmd(cmd);
  4703. CMD_TFO(cmd)->release_cmd_to_pool(cmd);
  4704. }
  4705. EXPORT_SYMBOL(transport_release_cmd_to_pool);
  4706. /* transport_generic_free_cmd():
  4707. *
  4708. * Called from processing frontend to release storage engine resources
  4709. */
  4710. void transport_generic_free_cmd(
  4711. struct se_cmd *cmd,
  4712. int wait_for_tasks,
  4713. int release_to_pool,
  4714. int session_reinstatement)
  4715. {
  4716. if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
  4717. transport_release_cmd_to_pool(cmd);
  4718. else {
  4719. core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
  4720. if (SE_LUN(cmd)) {
  4721. #if 0
  4722. printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
  4723. " SE_LUN(cmd)\n", cmd,
  4724. CMD_TFO(cmd)->get_task_tag(cmd));
  4725. #endif
  4726. transport_lun_remove_cmd(cmd);
  4727. }
  4728. if (wait_for_tasks && cmd->transport_wait_for_tasks)
  4729. cmd->transport_wait_for_tasks(cmd, 0, 0);
  4730. transport_free_dev_tasks(cmd);
  4731. transport_generic_remove(cmd, release_to_pool,
  4732. session_reinstatement);
  4733. }
  4734. }
  4735. EXPORT_SYMBOL(transport_generic_free_cmd);
  4736. static void transport_nop_wait_for_tasks(
  4737. struct se_cmd *cmd,
  4738. int remove_cmd,
  4739. int session_reinstatement)
  4740. {
  4741. return;
  4742. }
  4743. /* transport_lun_wait_for_tasks():
  4744. *
  4745. * Called from ConfigFS context to stop the passed struct se_cmd to allow
  4746. * an struct se_lun to be successfully shutdown.
  4747. */
  4748. static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
  4749. {
  4750. unsigned long flags;
  4751. int ret;
  4752. /*
  4753. * If the frontend has already requested this struct se_cmd to
  4754. * be stopped, we can safely ignore this struct se_cmd.
  4755. */
  4756. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  4757. if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
  4758. atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
  4759. DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
  4760. " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
  4761. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  4762. transport_cmd_check_stop(cmd, 1, 0);
  4763. return -1;
  4764. }
  4765. atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
  4766. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  4767. wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
  4768. ret = transport_stop_tasks_for_cmd(cmd);
  4769. DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
  4770. " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
  4771. if (!ret) {
  4772. DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
  4773. CMD_TFO(cmd)->get_task_tag(cmd));
  4774. wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
  4775. DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
  4776. CMD_TFO(cmd)->get_task_tag(cmd));
  4777. }
  4778. transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
  4779. return 0;
  4780. }
  4781. /* #define DEBUG_CLEAR_LUN */
  4782. #ifdef DEBUG_CLEAR_LUN
  4783. #define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
  4784. #else
  4785. #define DEBUG_CLEAR_L(x...)
  4786. #endif
  4787. static void __transport_clear_lun_from_sessions(struct se_lun *lun)
  4788. {
  4789. struct se_cmd *cmd = NULL;
  4790. unsigned long lun_flags, cmd_flags;
  4791. /*
  4792. * Do exception processing and return CHECK_CONDITION status to the
  4793. * Initiator Port.
  4794. */
  4795. spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
  4796. while (!list_empty_careful(&lun->lun_cmd_list)) {
  4797. cmd = list_entry(lun->lun_cmd_list.next,
  4798. struct se_cmd, se_lun_list);
  4799. list_del(&cmd->se_lun_list);
  4800. if (!(T_TASK(cmd))) {
  4801. printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
  4802. "[i,t]_state: %u/%u\n",
  4803. CMD_TFO(cmd)->get_task_tag(cmd),
  4804. CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
  4805. BUG();
  4806. }
  4807. atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
  4808. /*
  4809. * This will notify iscsi_target_transport.c:
  4810. * transport_cmd_check_stop() that a LUN shutdown is in
  4811. * progress for the iscsi_cmd_t.
  4812. */
  4813. spin_lock(&T_TASK(cmd)->t_state_lock);
  4814. DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
  4815. "_lun_stop for ITT: 0x%08x\n",
  4816. SE_LUN(cmd)->unpacked_lun,
  4817. CMD_TFO(cmd)->get_task_tag(cmd));
  4818. atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
  4819. spin_unlock(&T_TASK(cmd)->t_state_lock);
  4820. spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
  4821. if (!(SE_LUN(cmd))) {
  4822. printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
  4823. CMD_TFO(cmd)->get_task_tag(cmd),
  4824. CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
  4825. BUG();
  4826. }
  4827. /*
  4828. * If the Storage engine still owns the iscsi_cmd_t, determine
  4829. * and/or stop its context.
  4830. */
  4831. DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
  4832. "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
  4833. CMD_TFO(cmd)->get_task_tag(cmd));
  4834. if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
  4835. spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
  4836. continue;
  4837. }
  4838. DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
  4839. "_wait_for_tasks(): SUCCESS\n",
  4840. SE_LUN(cmd)->unpacked_lun,
  4841. CMD_TFO(cmd)->get_task_tag(cmd));
  4842. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
  4843. if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
  4844. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
  4845. goto check_cond;
  4846. }
  4847. atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
  4848. transport_all_task_dev_remove_state(cmd);
  4849. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
  4850. transport_free_dev_tasks(cmd);
  4851. /*
  4852. * The Storage engine stopped this struct se_cmd before it was
  4853. * send to the fabric frontend for delivery back to the
  4854. * Initiator Node. Return this SCSI CDB back with an
  4855. * CHECK_CONDITION status.
  4856. */
  4857. check_cond:
  4858. transport_send_check_condition_and_sense(cmd,
  4859. TCM_NON_EXISTENT_LUN, 0);
  4860. /*
  4861. * If the fabric frontend is waiting for this iscsi_cmd_t to
  4862. * be released, notify the waiting thread now that LU has
  4863. * finished accessing it.
  4864. */
  4865. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
  4866. if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
  4867. DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
  4868. " struct se_cmd: %p ITT: 0x%08x\n",
  4869. lun->unpacked_lun,
  4870. cmd, CMD_TFO(cmd)->get_task_tag(cmd));
  4871. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
  4872. cmd_flags);
  4873. transport_cmd_check_stop(cmd, 1, 0);
  4874. complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
  4875. spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
  4876. continue;
  4877. }
  4878. DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
  4879. lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
  4880. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
  4881. spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
  4882. }
  4883. spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
  4884. }
  4885. static int transport_clear_lun_thread(void *p)
  4886. {
  4887. struct se_lun *lun = (struct se_lun *)p;
  4888. __transport_clear_lun_from_sessions(lun);
  4889. complete(&lun->lun_shutdown_comp);
  4890. return 0;
  4891. }
  4892. int transport_clear_lun_from_sessions(struct se_lun *lun)
  4893. {
  4894. struct task_struct *kt;
  4895. kt = kthread_run(transport_clear_lun_thread, (void *)lun,
  4896. "tcm_cl_%u", lun->unpacked_lun);
  4897. if (IS_ERR(kt)) {
  4898. printk(KERN_ERR "Unable to start clear_lun thread\n");
  4899. return -1;
  4900. }
  4901. wait_for_completion(&lun->lun_shutdown_comp);
  4902. return 0;
  4903. }
  4904. /* transport_generic_wait_for_tasks():
  4905. *
  4906. * Called from frontend or passthrough context to wait for storage engine
  4907. * to pause and/or release frontend generated struct se_cmd.
  4908. */
  4909. static void transport_generic_wait_for_tasks(
  4910. struct se_cmd *cmd,
  4911. int remove_cmd,
  4912. int session_reinstatement)
  4913. {
  4914. unsigned long flags;
  4915. if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
  4916. return;
  4917. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  4918. /*
  4919. * If we are already stopped due to an external event (ie: LUN shutdown)
  4920. * sleep until the connection can have the passed struct se_cmd back.
  4921. * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
  4922. * transport_clear_lun_from_sessions() once the ConfigFS context caller
  4923. * has completed its operation on the struct se_cmd.
  4924. */
  4925. if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
  4926. DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
  4927. " wait_for_completion(&T_TASK(cmd)transport_lun_fe"
  4928. "_stop_comp); for ITT: 0x%08x\n",
  4929. CMD_TFO(cmd)->get_task_tag(cmd));
  4930. /*
  4931. * There is a special case for WRITES where a FE exception +
  4932. * LUN shutdown means ConfigFS context is still sleeping on
  4933. * transport_lun_stop_comp in transport_lun_wait_for_tasks().
  4934. * We go ahead and up transport_lun_stop_comp just to be sure
  4935. * here.
  4936. */
  4937. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  4938. complete(&T_TASK(cmd)->transport_lun_stop_comp);
  4939. wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
  4940. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  4941. transport_all_task_dev_remove_state(cmd);
  4942. /*
  4943. * At this point, the frontend who was the originator of this
  4944. * struct se_cmd, now owns the structure and can be released through
  4945. * normal means below.
  4946. */
  4947. DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
  4948. " wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
  4949. "stop_comp); for ITT: 0x%08x\n",
  4950. CMD_TFO(cmd)->get_task_tag(cmd));
  4951. atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
  4952. }
  4953. if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
  4954. atomic_read(&T_TASK(cmd)->t_transport_aborted))
  4955. goto remove;
  4956. atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
  4957. DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
  4958. " i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
  4959. " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
  4960. CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
  4961. cmd->deferred_t_state);
  4962. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  4963. wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
  4964. wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
  4965. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  4966. atomic_set(&T_TASK(cmd)->t_transport_active, 0);
  4967. atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
  4968. DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
  4969. "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
  4970. CMD_TFO(cmd)->get_task_tag(cmd));
  4971. remove:
  4972. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  4973. if (!remove_cmd)
  4974. return;
  4975. transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
  4976. }
  4977. static int transport_get_sense_codes(
  4978. struct se_cmd *cmd,
  4979. u8 *asc,
  4980. u8 *ascq)
  4981. {
  4982. *asc = cmd->scsi_asc;
  4983. *ascq = cmd->scsi_ascq;
  4984. return 0;
  4985. }
  4986. static int transport_set_sense_codes(
  4987. struct se_cmd *cmd,
  4988. u8 asc,
  4989. u8 ascq)
  4990. {
  4991. cmd->scsi_asc = asc;
  4992. cmd->scsi_ascq = ascq;
  4993. return 0;
  4994. }
  4995. int transport_send_check_condition_and_sense(
  4996. struct se_cmd *cmd,
  4997. u8 reason,
  4998. int from_transport)
  4999. {
  5000. unsigned char *buffer = cmd->sense_buffer;
  5001. unsigned long flags;
  5002. int offset;
  5003. u8 asc = 0, ascq = 0;
  5004. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  5005. if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
  5006. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  5007. return 0;
  5008. }
  5009. cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
  5010. spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
  5011. if (!reason && from_transport)
  5012. goto after_reason;
  5013. if (!from_transport)
  5014. cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
  5015. /*
  5016. * Data Segment and SenseLength of the fabric response PDU.
  5017. *
  5018. * TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
  5019. * from include/scsi/scsi_cmnd.h
  5020. */
  5021. offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
  5022. TRANSPORT_SENSE_BUFFER);
  5023. /*
  5024. * Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
  5025. * SENSE KEY values from include/scsi/scsi.h
  5026. */
  5027. switch (reason) {
  5028. case TCM_NON_EXISTENT_LUN:
  5029. case TCM_UNSUPPORTED_SCSI_OPCODE:
  5030. case TCM_SECTOR_COUNT_TOO_MANY:
  5031. /* CURRENT ERROR */
  5032. buffer[offset] = 0x70;
  5033. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5034. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5035. /* ILLEGAL REQUEST */
  5036. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  5037. /* INVALID COMMAND OPERATION CODE */
  5038. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x20;
  5039. break;
  5040. case TCM_UNKNOWN_MODE_PAGE:
  5041. /* CURRENT ERROR */
  5042. buffer[offset] = 0x70;
  5043. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5044. /* ILLEGAL REQUEST */
  5045. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  5046. /* INVALID FIELD IN CDB */
  5047. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
  5048. break;
  5049. case TCM_CHECK_CONDITION_ABORT_CMD:
  5050. /* CURRENT ERROR */
  5051. buffer[offset] = 0x70;
  5052. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5053. /* ABORTED COMMAND */
  5054. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  5055. /* BUS DEVICE RESET FUNCTION OCCURRED */
  5056. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x29;
  5057. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x03;
  5058. break;
  5059. case TCM_INCORRECT_AMOUNT_OF_DATA:
  5060. /* CURRENT ERROR */
  5061. buffer[offset] = 0x70;
  5062. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5063. /* ABORTED COMMAND */
  5064. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  5065. /* WRITE ERROR */
  5066. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
  5067. /* NOT ENOUGH UNSOLICITED DATA */
  5068. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0d;
  5069. break;
  5070. case TCM_INVALID_CDB_FIELD:
  5071. /* CURRENT ERROR */
  5072. buffer[offset] = 0x70;
  5073. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5074. /* ILLEGAL REQUEST */
  5075. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  5076. /* INVALID FIELD IN CDB */
  5077. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
  5078. break;
  5079. case TCM_INVALID_PARAMETER_LIST:
  5080. /* CURRENT ERROR */
  5081. buffer[offset] = 0x70;
  5082. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5083. /* ILLEGAL REQUEST */
  5084. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  5085. /* INVALID FIELD IN PARAMETER LIST */
  5086. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
  5087. break;
  5088. case TCM_UNEXPECTED_UNSOLICITED_DATA:
  5089. /* CURRENT ERROR */
  5090. buffer[offset] = 0x70;
  5091. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5092. /* ABORTED COMMAND */
  5093. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  5094. /* WRITE ERROR */
  5095. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x0c;
  5096. /* UNEXPECTED_UNSOLICITED_DATA */
  5097. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x0c;
  5098. break;
  5099. case TCM_SERVICE_CRC_ERROR:
  5100. /* CURRENT ERROR */
  5101. buffer[offset] = 0x70;
  5102. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5103. /* ABORTED COMMAND */
  5104. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  5105. /* PROTOCOL SERVICE CRC ERROR */
  5106. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x47;
  5107. /* N/A */
  5108. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x05;
  5109. break;
  5110. case TCM_SNACK_REJECTED:
  5111. /* CURRENT ERROR */
  5112. buffer[offset] = 0x70;
  5113. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5114. /* ABORTED COMMAND */
  5115. buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
  5116. /* READ ERROR */
  5117. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x11;
  5118. /* FAILED RETRANSMISSION REQUEST */
  5119. buffer[offset+SPC_ASCQ_KEY_OFFSET] = 0x13;
  5120. break;
  5121. case TCM_WRITE_PROTECTED:
  5122. /* CURRENT ERROR */
  5123. buffer[offset] = 0x70;
  5124. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5125. /* DATA PROTECT */
  5126. buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
  5127. /* WRITE PROTECTED */
  5128. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
  5129. break;
  5130. case TCM_CHECK_CONDITION_UNIT_ATTENTION:
  5131. /* CURRENT ERROR */
  5132. buffer[offset] = 0x70;
  5133. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5134. /* UNIT ATTENTION */
  5135. buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
  5136. core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
  5137. buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
  5138. buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
  5139. break;
  5140. case TCM_CHECK_CONDITION_NOT_READY:
  5141. /* CURRENT ERROR */
  5142. buffer[offset] = 0x70;
  5143. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5144. /* Not Ready */
  5145. buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
  5146. transport_get_sense_codes(cmd, &asc, &ascq);
  5147. buffer[offset+SPC_ASC_KEY_OFFSET] = asc;
  5148. buffer[offset+SPC_ASCQ_KEY_OFFSET] = ascq;
  5149. break;
  5150. case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
  5151. default:
  5152. /* CURRENT ERROR */
  5153. buffer[offset] = 0x70;
  5154. buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
  5155. /* ILLEGAL REQUEST */
  5156. buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
  5157. /* LOGICAL UNIT COMMUNICATION FAILURE */
  5158. buffer[offset+SPC_ASC_KEY_OFFSET] = 0x80;
  5159. break;
  5160. }
  5161. /*
  5162. * This code uses linux/include/scsi/scsi.h SAM status codes!
  5163. */
  5164. cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
  5165. /*
  5166. * Automatically padded, this value is encoded in the fabric's
  5167. * data_length response PDU containing the SCSI defined sense data.
  5168. */
  5169. cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
  5170. after_reason:
  5171. CMD_TFO(cmd)->queue_status(cmd);
  5172. return 0;
  5173. }
  5174. EXPORT_SYMBOL(transport_send_check_condition_and_sense);
  5175. int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
  5176. {
  5177. int ret = 0;
  5178. if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
  5179. if (!(send_status) ||
  5180. (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
  5181. return 1;
  5182. #if 0
  5183. printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
  5184. " status for CDB: 0x%02x ITT: 0x%08x\n",
  5185. T_TASK(cmd)->t_task_cdb[0],
  5186. CMD_TFO(cmd)->get_task_tag(cmd));
  5187. #endif
  5188. cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
  5189. CMD_TFO(cmd)->queue_status(cmd);
  5190. ret = 1;
  5191. }
  5192. return ret;
  5193. }
  5194. EXPORT_SYMBOL(transport_check_aborted_status);
  5195. void transport_send_task_abort(struct se_cmd *cmd)
  5196. {
  5197. /*
  5198. * If there are still expected incoming fabric WRITEs, we wait
  5199. * until until they have completed before sending a TASK_ABORTED
  5200. * response. This response with TASK_ABORTED status will be
  5201. * queued back to fabric module by transport_check_aborted_status().
  5202. */
  5203. if (cmd->data_direction == DMA_TO_DEVICE) {
  5204. if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
  5205. atomic_inc(&T_TASK(cmd)->t_transport_aborted);
  5206. smp_mb__after_atomic_inc();
  5207. cmd->scsi_status = SAM_STAT_TASK_ABORTED;
  5208. transport_new_cmd_failure(cmd);
  5209. return;
  5210. }
  5211. }
  5212. cmd->scsi_status = SAM_STAT_TASK_ABORTED;
  5213. #if 0
  5214. printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
  5215. " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
  5216. CMD_TFO(cmd)->get_task_tag(cmd));
  5217. #endif
  5218. CMD_TFO(cmd)->queue_status(cmd);
  5219. }
  5220. /* transport_generic_do_tmr():
  5221. *
  5222. *
  5223. */
  5224. int transport_generic_do_tmr(struct se_cmd *cmd)
  5225. {
  5226. struct se_cmd *ref_cmd;
  5227. struct se_device *dev = SE_DEV(cmd);
  5228. struct se_tmr_req *tmr = cmd->se_tmr_req;
  5229. int ret;
  5230. switch (tmr->function) {
  5231. case TMR_ABORT_TASK:
  5232. ref_cmd = tmr->ref_cmd;
  5233. tmr->response = TMR_FUNCTION_REJECTED;
  5234. break;
  5235. case TMR_ABORT_TASK_SET:
  5236. case TMR_CLEAR_ACA:
  5237. case TMR_CLEAR_TASK_SET:
  5238. tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
  5239. break;
  5240. case TMR_LUN_RESET:
  5241. ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
  5242. tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
  5243. TMR_FUNCTION_REJECTED;
  5244. break;
  5245. case TMR_TARGET_WARM_RESET:
  5246. tmr->response = TMR_FUNCTION_REJECTED;
  5247. break;
  5248. case TMR_TARGET_COLD_RESET:
  5249. tmr->response = TMR_FUNCTION_REJECTED;
  5250. break;
  5251. default:
  5252. printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
  5253. tmr->function);
  5254. tmr->response = TMR_FUNCTION_REJECTED;
  5255. break;
  5256. }
  5257. cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
  5258. CMD_TFO(cmd)->queue_tm_rsp(cmd);
  5259. transport_cmd_check_stop(cmd, 2, 0);
  5260. return 0;
  5261. }
  5262. /*
  5263. * Called with spin_lock_irq(&dev->execute_task_lock); held
  5264. *
  5265. */
  5266. static struct se_task *
  5267. transport_get_task_from_state_list(struct se_device *dev)
  5268. {
  5269. struct se_task *task;
  5270. if (list_empty(&dev->state_task_list))
  5271. return NULL;
  5272. list_for_each_entry(task, &dev->state_task_list, t_state_list)
  5273. break;
  5274. list_del(&task->t_state_list);
  5275. atomic_set(&task->task_state_active, 0);
  5276. return task;
  5277. }
  5278. static void transport_processing_shutdown(struct se_device *dev)
  5279. {
  5280. struct se_cmd *cmd;
  5281. struct se_queue_req *qr;
  5282. struct se_task *task;
  5283. u8 state;
  5284. unsigned long flags;
  5285. /*
  5286. * Empty the struct se_device's struct se_task state list.
  5287. */
  5288. spin_lock_irqsave(&dev->execute_task_lock, flags);
  5289. while ((task = transport_get_task_from_state_list(dev))) {
  5290. if (!(TASK_CMD(task))) {
  5291. printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
  5292. continue;
  5293. }
  5294. cmd = TASK_CMD(task);
  5295. if (!T_TASK(cmd)) {
  5296. printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
  5297. " %p ITT: 0x%08x\n", task, cmd,
  5298. CMD_TFO(cmd)->get_task_tag(cmd));
  5299. continue;
  5300. }
  5301. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  5302. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  5303. DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
  5304. " i_state/def_i_state: %d/%d, t_state/def_t_state:"
  5305. " %d/%d cdb: 0x%02x\n", cmd, task,
  5306. CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
  5307. CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
  5308. cmd->t_state, cmd->deferred_t_state,
  5309. T_TASK(cmd)->t_task_cdb[0]);
  5310. DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
  5311. " %d t_task_cdbs_sent: %d -- t_transport_active: %d"
  5312. " t_transport_stop: %d t_transport_sent: %d\n",
  5313. CMD_TFO(cmd)->get_task_tag(cmd),
  5314. T_TASK(cmd)->t_task_cdbs,
  5315. atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
  5316. atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
  5317. atomic_read(&T_TASK(cmd)->t_transport_active),
  5318. atomic_read(&T_TASK(cmd)->t_transport_stop),
  5319. atomic_read(&T_TASK(cmd)->t_transport_sent));
  5320. if (atomic_read(&task->task_active)) {
  5321. atomic_set(&task->task_stop, 1);
  5322. spin_unlock_irqrestore(
  5323. &T_TASK(cmd)->t_state_lock, flags);
  5324. DEBUG_DO("Waiting for task: %p to shutdown for dev:"
  5325. " %p\n", task, dev);
  5326. wait_for_completion(&task->task_stop_comp);
  5327. DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
  5328. task, dev);
  5329. spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
  5330. atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
  5331. atomic_set(&task->task_active, 0);
  5332. atomic_set(&task->task_stop, 0);
  5333. } else {
  5334. if (atomic_read(&task->task_execute_queue) != 0)
  5335. transport_remove_task_from_execute_queue(task, dev);
  5336. }
  5337. __transport_stop_task_timer(task, &flags);
  5338. if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
  5339. spin_unlock_irqrestore(
  5340. &T_TASK(cmd)->t_state_lock, flags);
  5341. DEBUG_DO("Skipping task: %p, dev: %p for"
  5342. " t_task_cdbs_ex_left: %d\n", task, dev,
  5343. atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
  5344. spin_lock_irqsave(&dev->execute_task_lock, flags);
  5345. continue;
  5346. }
  5347. if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
  5348. DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
  5349. " %p\n", task, dev);
  5350. if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
  5351. spin_unlock_irqrestore(
  5352. &T_TASK(cmd)->t_state_lock, flags);
  5353. transport_send_check_condition_and_sense(
  5354. cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
  5355. 0);
  5356. transport_remove_cmd_from_queue(cmd,
  5357. SE_DEV(cmd)->dev_queue_obj);
  5358. transport_lun_remove_cmd(cmd);
  5359. transport_cmd_check_stop(cmd, 1, 0);
  5360. } else {
  5361. spin_unlock_irqrestore(
  5362. &T_TASK(cmd)->t_state_lock, flags);
  5363. transport_remove_cmd_from_queue(cmd,
  5364. SE_DEV(cmd)->dev_queue_obj);
  5365. transport_lun_remove_cmd(cmd);
  5366. if (transport_cmd_check_stop(cmd, 1, 0))
  5367. transport_generic_remove(cmd, 0, 0);
  5368. }
  5369. spin_lock_irqsave(&dev->execute_task_lock, flags);
  5370. continue;
  5371. }
  5372. DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
  5373. task, dev);
  5374. if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
  5375. spin_unlock_irqrestore(
  5376. &T_TASK(cmd)->t_state_lock, flags);
  5377. transport_send_check_condition_and_sense(cmd,
  5378. TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
  5379. transport_remove_cmd_from_queue(cmd,
  5380. SE_DEV(cmd)->dev_queue_obj);
  5381. transport_lun_remove_cmd(cmd);
  5382. transport_cmd_check_stop(cmd, 1, 0);
  5383. } else {
  5384. spin_unlock_irqrestore(
  5385. &T_TASK(cmd)->t_state_lock, flags);
  5386. transport_remove_cmd_from_queue(cmd,
  5387. SE_DEV(cmd)->dev_queue_obj);
  5388. transport_lun_remove_cmd(cmd);
  5389. if (transport_cmd_check_stop(cmd, 1, 0))
  5390. transport_generic_remove(cmd, 0, 0);
  5391. }
  5392. spin_lock_irqsave(&dev->execute_task_lock, flags);
  5393. }
  5394. spin_unlock_irqrestore(&dev->execute_task_lock, flags);
  5395. /*
  5396. * Empty the struct se_device's struct se_cmd list.
  5397. */
  5398. spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
  5399. while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
  5400. spin_unlock_irqrestore(
  5401. &dev->dev_queue_obj->cmd_queue_lock, flags);
  5402. cmd = (struct se_cmd *)qr->cmd;
  5403. state = qr->state;
  5404. kfree(qr);
  5405. DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
  5406. cmd, state);
  5407. if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
  5408. transport_send_check_condition_and_sense(cmd,
  5409. TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
  5410. transport_lun_remove_cmd(cmd);
  5411. transport_cmd_check_stop(cmd, 1, 0);
  5412. } else {
  5413. transport_lun_remove_cmd(cmd);
  5414. if (transport_cmd_check_stop(cmd, 1, 0))
  5415. transport_generic_remove(cmd, 0, 0);
  5416. }
  5417. spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
  5418. }
  5419. spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
  5420. }
  5421. /* transport_processing_thread():
  5422. *
  5423. *
  5424. */
  5425. static int transport_processing_thread(void *param)
  5426. {
  5427. int ret, t_state;
  5428. struct se_cmd *cmd;
  5429. struct se_device *dev = (struct se_device *) param;
  5430. struct se_queue_req *qr;
  5431. set_user_nice(current, -20);
  5432. while (!kthread_should_stop()) {
  5433. ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
  5434. atomic_read(&dev->dev_queue_obj->queue_cnt) ||
  5435. kthread_should_stop());
  5436. if (ret < 0)
  5437. goto out;
  5438. spin_lock_irq(&dev->dev_status_lock);
  5439. if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) {
  5440. spin_unlock_irq(&dev->dev_status_lock);
  5441. transport_processing_shutdown(dev);
  5442. continue;
  5443. }
  5444. spin_unlock_irq(&dev->dev_status_lock);
  5445. get_cmd:
  5446. __transport_execute_tasks(dev);
  5447. qr = transport_get_qr_from_queue(dev->dev_queue_obj);
  5448. if (!(qr))
  5449. continue;
  5450. cmd = (struct se_cmd *)qr->cmd;
  5451. t_state = qr->state;
  5452. kfree(qr);
  5453. switch (t_state) {
  5454. case TRANSPORT_NEW_CMD_MAP:
  5455. if (!(CMD_TFO(cmd)->new_cmd_map)) {
  5456. printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
  5457. " NULL for TRANSPORT_NEW_CMD_MAP\n");
  5458. BUG();
  5459. }
  5460. ret = CMD_TFO(cmd)->new_cmd_map(cmd);
  5461. if (ret < 0) {
  5462. cmd->transport_error_status = ret;
  5463. transport_generic_request_failure(cmd, NULL,
  5464. 0, (cmd->data_direction !=
  5465. DMA_TO_DEVICE));
  5466. break;
  5467. }
  5468. /* Fall through */
  5469. case TRANSPORT_NEW_CMD:
  5470. ret = transport_generic_new_cmd(cmd);
  5471. if (ret < 0) {
  5472. cmd->transport_error_status = ret;
  5473. transport_generic_request_failure(cmd, NULL,
  5474. 0, (cmd->data_direction !=
  5475. DMA_TO_DEVICE));
  5476. }
  5477. break;
  5478. case TRANSPORT_PROCESS_WRITE:
  5479. transport_generic_process_write(cmd);
  5480. break;
  5481. case TRANSPORT_COMPLETE_OK:
  5482. transport_stop_all_task_timers(cmd);
  5483. transport_generic_complete_ok(cmd);
  5484. break;
  5485. case TRANSPORT_REMOVE:
  5486. transport_generic_remove(cmd, 1, 0);
  5487. break;
  5488. case TRANSPORT_FREE_CMD_INTR:
  5489. transport_generic_free_cmd(cmd, 0, 1, 0);
  5490. break;
  5491. case TRANSPORT_PROCESS_TMR:
  5492. transport_generic_do_tmr(cmd);
  5493. break;
  5494. case TRANSPORT_COMPLETE_FAILURE:
  5495. transport_generic_request_failure(cmd, NULL, 1, 1);
  5496. break;
  5497. case TRANSPORT_COMPLETE_TIMEOUT:
  5498. transport_stop_all_task_timers(cmd);
  5499. transport_generic_request_timeout(cmd);
  5500. break;
  5501. default:
  5502. printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
  5503. " %d for ITT: 0x%08x i_state: %d on SE LUN:"
  5504. " %u\n", t_state, cmd->deferred_t_state,
  5505. CMD_TFO(cmd)->get_task_tag(cmd),
  5506. CMD_TFO(cmd)->get_cmd_state(cmd),
  5507. SE_LUN(cmd)->unpacked_lun);
  5508. BUG();
  5509. }
  5510. goto get_cmd;
  5511. }
  5512. out:
  5513. transport_release_all_cmds(dev);
  5514. dev->process_thread = NULL;
  5515. return 0;
  5516. }