dc395x.c 141 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925
  1. /*
  2. * dc395x.c
  3. *
  4. * Device Driver for Tekram DC395(U/UW/F), DC315(U)
  5. * PCI SCSI Bus Master Host Adapter
  6. * (SCSI chip set used Tekram ASIC TRM-S1040)
  7. *
  8. * Authors:
  9. * C.L. Huang <ching@tekram.com.tw>
  10. * Erich Chen <erich@tekram.com.tw>
  11. * (C) Copyright 1995-1999 Tekram Technology Co., Ltd.
  12. *
  13. * Kurt Garloff <garloff@suse.de>
  14. * (C) 1999-2000 Kurt Garloff
  15. *
  16. * Oliver Neukum <oliver@neukum.name>
  17. * Ali Akcaagac <aliakc@web.de>
  18. * Jamie Lenehan <lenehan@twibble.org>
  19. * (C) 2003
  20. *
  21. * License: GNU GPL
  22. *
  23. *************************************************************************
  24. *
  25. * Redistribution and use in source and binary forms, with or without
  26. * modification, are permitted provided that the following conditions
  27. * are met:
  28. * 1. Redistributions of source code must retain the above copyright
  29. * notice, this list of conditions and the following disclaimer.
  30. * 2. Redistributions in binary form must reproduce the above copyright
  31. * notice, this list of conditions and the following disclaimer in the
  32. * documentation and/or other materials provided with the distribution.
  33. * 3. The name of the author may not be used to endorse or promote products
  34. * derived from this software without specific prior written permission.
  35. *
  36. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  37. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  38. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  39. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  40. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  41. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  45. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46. *
  47. ************************************************************************
  48. */
  49. #include <linux/module.h>
  50. #include <linux/moduleparam.h>
  51. #include <linux/delay.h>
  52. #include <linux/ctype.h>
  53. #include <linux/blkdev.h>
  54. #include <linux/interrupt.h>
  55. #include <linux/init.h>
  56. #include <linux/spinlock.h>
  57. #include <linux/pci.h>
  58. #include <linux/list.h>
  59. #include <linux/vmalloc.h>
  60. #include <linux/slab.h>
  61. #include <asm/io.h>
  62. #include <scsi/scsi.h>
  63. #include <scsi/scsicam.h> /* needed for scsicam_bios_param */
  64. #include <scsi/scsi_cmnd.h>
  65. #include <scsi/scsi_device.h>
  66. #include <scsi/scsi_host.h>
  67. #include "dc395x.h"
  68. #define DC395X_NAME "dc395x"
  69. #define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
  70. #define DC395X_VERSION "v2.05, 2004/03/08"
  71. /*---------------------------------------------------------------------------
  72. Features
  73. ---------------------------------------------------------------------------*/
  74. /*
  75. * Set to disable parts of the driver
  76. */
  77. /*#define DC395x_NO_DISCONNECT*/
  78. /*#define DC395x_NO_TAGQ*/
  79. /*#define DC395x_NO_SYNC*/
  80. /*#define DC395x_NO_WIDE*/
  81. /*---------------------------------------------------------------------------
  82. Debugging
  83. ---------------------------------------------------------------------------*/
  84. /*
  85. * Types of debugging that can be enabled and disabled
  86. */
  87. #define DBG_KG 0x0001
  88. #define DBG_0 0x0002
  89. #define DBG_1 0x0004
  90. #define DBG_SG 0x0020
  91. #define DBG_FIFO 0x0040
  92. #define DBG_PIO 0x0080
  93. /*
  94. * Set set of things to output debugging for.
  95. * Undefine to remove all debugging
  96. */
  97. /*#define DEBUG_MASK (DBG_0|DBG_1|DBG_SG|DBG_FIFO|DBG_PIO)*/
  98. /*#define DEBUG_MASK DBG_0*/
  99. /*
  100. * Output a kernel mesage at the specified level and append the
  101. * driver name and a ": " to the start of the message
  102. */
  103. #define dprintkl(level, format, arg...) \
  104. printk(level DC395X_NAME ": " format , ## arg)
  105. #ifdef DEBUG_MASK
  106. /*
  107. * print a debug message - this is formated with KERN_DEBUG, then the
  108. * driver name followed by a ": " and then the message is output.
  109. * This also checks that the specified debug level is enabled before
  110. * outputing the message
  111. */
  112. #define dprintkdbg(type, format, arg...) \
  113. do { \
  114. if ((type) & (DEBUG_MASK)) \
  115. dprintkl(KERN_DEBUG , format , ## arg); \
  116. } while (0)
  117. /*
  118. * Check if the specified type of debugging is enabled
  119. */
  120. #define debug_enabled(type) ((DEBUG_MASK) & (type))
  121. #else
  122. /*
  123. * No debugging. Do nothing
  124. */
  125. #define dprintkdbg(type, format, arg...) \
  126. do {} while (0)
  127. #define debug_enabled(type) (0)
  128. #endif
  129. #ifndef PCI_VENDOR_ID_TEKRAM
  130. #define PCI_VENDOR_ID_TEKRAM 0x1DE1 /* Vendor ID */
  131. #endif
  132. #ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
  133. #define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391 /* Device ID */
  134. #endif
  135. #define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
  136. #define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
  137. #define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
  138. #define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
  139. #define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
  140. #define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
  141. #define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
  142. #define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
  143. /* cmd->result */
  144. #define RES_TARGET 0x000000FF /* Target State */
  145. #define RES_TARGET_LNX STATUS_MASK /* Only official ... */
  146. #define RES_ENDMSG 0x0000FF00 /* End Message */
  147. #define RES_DID 0x00FF0000 /* DID_ codes */
  148. #define RES_DRV 0xFF000000 /* DRIVER_ codes */
  149. #define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
  150. #define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
  151. #define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
  152. #define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
  153. #define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
  154. #define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
  155. #define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
  156. #define TAG_NONE 255
  157. /*
  158. * srb->segement_x is the hw sg list. It is always allocated as a
  159. * DC395x_MAX_SG_LISTENTRY entries in a linear block which does not
  160. * cross a page boundy.
  161. */
  162. #define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
  163. struct SGentry {
  164. u32 address; /* bus! address */
  165. u32 length;
  166. };
  167. /* The SEEPROM structure for TRM_S1040 */
  168. struct NVRamTarget {
  169. u8 cfg0; /* Target configuration byte 0 */
  170. u8 period; /* Target period */
  171. u8 cfg2; /* Target configuration byte 2 */
  172. u8 cfg3; /* Target configuration byte 3 */
  173. };
  174. struct NvRamType {
  175. u8 sub_vendor_id[2]; /* 0,1 Sub Vendor ID */
  176. u8 sub_sys_id[2]; /* 2,3 Sub System ID */
  177. u8 sub_class; /* 4 Sub Class */
  178. u8 vendor_id[2]; /* 5,6 Vendor ID */
  179. u8 device_id[2]; /* 7,8 Device ID */
  180. u8 reserved; /* 9 Reserved */
  181. struct NVRamTarget target[DC395x_MAX_SCSI_ID];
  182. /** 10,11,12,13
  183. ** 14,15,16,17
  184. ** ....
  185. ** ....
  186. ** 70,71,72,73
  187. */
  188. u8 scsi_id; /* 74 Host Adapter SCSI ID */
  189. u8 channel_cfg; /* 75 Channel configuration */
  190. u8 delay_time; /* 76 Power on delay time */
  191. u8 max_tag; /* 77 Maximum tags */
  192. u8 reserved0; /* 78 */
  193. u8 boot_target; /* 79 */
  194. u8 boot_lun; /* 80 */
  195. u8 reserved1; /* 81 */
  196. u16 reserved2[22]; /* 82,..125 */
  197. u16 cksum; /* 126,127 */
  198. };
  199. struct ScsiReqBlk {
  200. struct list_head list; /* next/prev ptrs for srb lists */
  201. struct DeviceCtlBlk *dcb;
  202. struct scsi_cmnd *cmd;
  203. struct SGentry *segment_x; /* Linear array of hw sg entries (up to 64 entries) */
  204. dma_addr_t sg_bus_addr; /* Bus address of sg list (ie, of segment_x) */
  205. u8 sg_count; /* No of HW sg entries for this request */
  206. u8 sg_index; /* Index of HW sg entry for this request */
  207. size_t total_xfer_length; /* Total number of bytes remaining to be transferred */
  208. size_t request_length; /* Total number of bytes in this request */
  209. /*
  210. * The sense buffer handling function, request_sense, uses
  211. * the first hw sg entry (segment_x[0]) and the transfer
  212. * length (total_xfer_length). While doing this it stores the
  213. * original values into the last sg hw list
  214. * (srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1] and the
  215. * total_xfer_length in xferred. These values are restored in
  216. * pci_unmap_srb_sense. This is the only place xferred is used.
  217. */
  218. size_t xferred; /* Saved copy of total_xfer_length */
  219. u16 state;
  220. u8 msgin_buf[6];
  221. u8 msgout_buf[6];
  222. u8 adapter_status;
  223. u8 target_status;
  224. u8 msg_count;
  225. u8 end_message;
  226. u8 tag_number;
  227. u8 status;
  228. u8 retry_count;
  229. u8 flag;
  230. u8 scsi_phase;
  231. };
  232. struct DeviceCtlBlk {
  233. struct list_head list; /* next/prev ptrs for the dcb list */
  234. struct AdapterCtlBlk *acb;
  235. struct list_head srb_going_list; /* head of going srb list */
  236. struct list_head srb_waiting_list; /* head of waiting srb list */
  237. struct ScsiReqBlk *active_srb;
  238. u32 tag_mask;
  239. u16 max_command;
  240. u8 target_id; /* SCSI Target ID (SCSI Only) */
  241. u8 target_lun; /* SCSI Log. Unit (SCSI Only) */
  242. u8 identify_msg;
  243. u8 dev_mode;
  244. u8 inquiry7; /* To store Inquiry flags */
  245. u8 sync_mode; /* 0:async mode */
  246. u8 min_nego_period; /* for nego. */
  247. u8 sync_period; /* for reg. */
  248. u8 sync_offset; /* for reg. and nego.(low nibble) */
  249. u8 flag;
  250. u8 dev_type;
  251. u8 init_tcq_flag;
  252. };
  253. struct AdapterCtlBlk {
  254. struct Scsi_Host *scsi_host;
  255. unsigned long io_port_base;
  256. unsigned long io_port_len;
  257. struct list_head dcb_list; /* head of going dcb list */
  258. struct DeviceCtlBlk *dcb_run_robin;
  259. struct DeviceCtlBlk *active_dcb;
  260. struct list_head srb_free_list; /* head of free srb list */
  261. struct ScsiReqBlk *tmp_srb;
  262. struct timer_list waiting_timer;
  263. struct timer_list selto_timer;
  264. u16 srb_count;
  265. u8 sel_timeout;
  266. unsigned int irq_level;
  267. u8 tag_max_num;
  268. u8 acb_flag;
  269. u8 gmode2;
  270. u8 config;
  271. u8 lun_chk;
  272. u8 scan_devices;
  273. u8 hostid_bit;
  274. u8 dcb_map[DC395x_MAX_SCSI_ID];
  275. struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
  276. struct pci_dev *dev;
  277. u8 msg_len;
  278. struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
  279. struct ScsiReqBlk srb;
  280. struct NvRamType eeprom; /* eeprom settings for this adapter */
  281. };
  282. /*---------------------------------------------------------------------------
  283. Forward declarations
  284. ---------------------------------------------------------------------------*/
  285. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  286. u16 *pscsi_status);
  287. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  288. u16 *pscsi_status);
  289. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  290. u16 *pscsi_status);
  291. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  292. u16 *pscsi_status);
  293. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  294. u16 *pscsi_status);
  295. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  296. u16 *pscsi_status);
  297. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  298. u16 *pscsi_status);
  299. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  300. u16 *pscsi_status);
  301. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  302. u16 *pscsi_status);
  303. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  304. u16 *pscsi_status);
  305. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  306. u16 *pscsi_status);
  307. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  308. u16 *pscsi_status);
  309. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  310. u16 *pscsi_status);
  311. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  312. u16 *pscsi_status);
  313. static void set_basic_config(struct AdapterCtlBlk *acb);
  314. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  315. struct ScsiReqBlk *srb);
  316. static void reset_scsi_bus(struct AdapterCtlBlk *acb);
  317. static void data_io_transfer(struct AdapterCtlBlk *acb,
  318. struct ScsiReqBlk *srb, u16 io_dir);
  319. static void disconnect(struct AdapterCtlBlk *acb);
  320. static void reselect(struct AdapterCtlBlk *acb);
  321. static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  322. struct ScsiReqBlk *srb);
  323. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  324. struct ScsiReqBlk *srb);
  325. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  326. struct ScsiReqBlk *srb);
  327. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
  328. struct scsi_cmnd *cmd, u8 force);
  329. static void scsi_reset_detect(struct AdapterCtlBlk *acb);
  330. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
  331. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  332. struct ScsiReqBlk *srb);
  333. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  334. struct ScsiReqBlk *srb);
  335. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  336. struct ScsiReqBlk *srb);
  337. static void set_xfer_rate(struct AdapterCtlBlk *acb,
  338. struct DeviceCtlBlk *dcb);
  339. static void waiting_timeout(unsigned long ptr);
  340. /*---------------------------------------------------------------------------
  341. Static Data
  342. ---------------------------------------------------------------------------*/
  343. static u16 current_sync_offset = 0;
  344. static void *dc395x_scsi_phase0[] = {
  345. data_out_phase0,/* phase:0 */
  346. data_in_phase0, /* phase:1 */
  347. command_phase0, /* phase:2 */
  348. status_phase0, /* phase:3 */
  349. nop0, /* phase:4 PH_BUS_FREE .. initial phase */
  350. nop0, /* phase:5 PH_BUS_FREE .. initial phase */
  351. msgout_phase0, /* phase:6 */
  352. msgin_phase0, /* phase:7 */
  353. };
  354. static void *dc395x_scsi_phase1[] = {
  355. data_out_phase1,/* phase:0 */
  356. data_in_phase1, /* phase:1 */
  357. command_phase1, /* phase:2 */
  358. status_phase1, /* phase:3 */
  359. nop1, /* phase:4 PH_BUS_FREE .. initial phase */
  360. nop1, /* phase:5 PH_BUS_FREE .. initial phase */
  361. msgout_phase1, /* phase:6 */
  362. msgin_phase1, /* phase:7 */
  363. };
  364. /*
  365. *Fast20: 000 50ns, 20.0 MHz
  366. * 001 75ns, 13.3 MHz
  367. * 010 100ns, 10.0 MHz
  368. * 011 125ns, 8.0 MHz
  369. * 100 150ns, 6.6 MHz
  370. * 101 175ns, 5.7 MHz
  371. * 110 200ns, 5.0 MHz
  372. * 111 250ns, 4.0 MHz
  373. *
  374. *Fast40(LVDS): 000 25ns, 40.0 MHz
  375. * 001 50ns, 20.0 MHz
  376. * 010 75ns, 13.3 MHz
  377. * 011 100ns, 10.0 MHz
  378. * 100 125ns, 8.0 MHz
  379. * 101 150ns, 6.6 MHz
  380. * 110 175ns, 5.7 MHz
  381. * 111 200ns, 5.0 MHz
  382. */
  383. /*static u8 clock_period[] = {12,19,25,31,37,44,50,62};*/
  384. /* real period:48ns,76ns,100ns,124ns,148ns,176ns,200ns,248ns */
  385. static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
  386. static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
  387. /*---------------------------------------------------------------------------
  388. Configuration
  389. ---------------------------------------------------------------------------*/
  390. /*
  391. * Module/boot parameters currently effect *all* instances of the
  392. * card in the system.
  393. */
  394. /*
  395. * Command line parameters are stored in a structure below.
  396. * These are the index's into the structure for the various
  397. * command line options.
  398. */
  399. #define CFG_ADAPTER_ID 0
  400. #define CFG_MAX_SPEED 1
  401. #define CFG_DEV_MODE 2
  402. #define CFG_ADAPTER_MODE 3
  403. #define CFG_TAGS 4
  404. #define CFG_RESET_DELAY 5
  405. #define CFG_NUM 6 /* number of configuration items */
  406. /*
  407. * Value used to indicate that a command line override
  408. * hasn't been used to modify the value.
  409. */
  410. #define CFG_PARAM_UNSET -1
  411. /*
  412. * Hold command line parameters.
  413. */
  414. struct ParameterData {
  415. int value; /* value of this setting */
  416. int min; /* minimum value */
  417. int max; /* maximum value */
  418. int def; /* default value */
  419. int safe; /* safe value */
  420. };
  421. static struct ParameterData __devinitdata cfg_data[] = {
  422. { /* adapter id */
  423. CFG_PARAM_UNSET,
  424. 0,
  425. 15,
  426. 7,
  427. 7
  428. },
  429. { /* max speed */
  430. CFG_PARAM_UNSET,
  431. 0,
  432. 7,
  433. 1, /* 13.3Mhz */
  434. 4, /* 6.7Hmz */
  435. },
  436. { /* dev mode */
  437. CFG_PARAM_UNSET,
  438. 0,
  439. 0x3f,
  440. NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
  441. NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
  442. NTC_DO_SEND_START,
  443. NTC_DO_PARITY_CHK | NTC_DO_SEND_START
  444. },
  445. { /* adapter mode */
  446. CFG_PARAM_UNSET,
  447. 0,
  448. 0x2f,
  449. #ifdef CONFIG_SCSI_MULTI_LUN
  450. NAC_SCANLUN |
  451. #endif
  452. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
  453. /*| NAC_ACTIVE_NEG*/,
  454. NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
  455. },
  456. { /* tags */
  457. CFG_PARAM_UNSET,
  458. 0,
  459. 5,
  460. 3, /* 16 tags (??) */
  461. 2,
  462. },
  463. { /* reset delay */
  464. CFG_PARAM_UNSET,
  465. 0,
  466. 180,
  467. 1, /* 1 second */
  468. 10, /* 10 seconds */
  469. }
  470. };
  471. /*
  472. * Safe settings. If set to zero the BIOS/default values with
  473. * command line overrides will be used. If set to 1 then safe and
  474. * slow settings will be used.
  475. */
  476. static int use_safe_settings = 0;
  477. module_param_named(safe, use_safe_settings, bool, 0);
  478. MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
  479. module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
  480. MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
  481. module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
  482. MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
  483. module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
  484. MODULE_PARM_DESC(dev_mode, "Device mode.");
  485. module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
  486. MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
  487. module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
  488. MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
  489. module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
  490. MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
  491. /**
  492. * set_safe_settings - if the use_safe_settings option is set then
  493. * set all values to the safe and slow values.
  494. **/
  495. static void __devinit set_safe_settings(void)
  496. {
  497. if (use_safe_settings)
  498. {
  499. int i;
  500. dprintkl(KERN_INFO, "Using safe settings.\n");
  501. for (i = 0; i < CFG_NUM; i++)
  502. {
  503. cfg_data[i].value = cfg_data[i].safe;
  504. }
  505. }
  506. }
  507. /**
  508. * fix_settings - reset any boot parameters which are out of range
  509. * back to the default values.
  510. **/
  511. static void __devinit fix_settings(void)
  512. {
  513. int i;
  514. dprintkdbg(DBG_1,
  515. "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
  516. "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
  517. cfg_data[CFG_ADAPTER_ID].value,
  518. cfg_data[CFG_MAX_SPEED].value,
  519. cfg_data[CFG_DEV_MODE].value,
  520. cfg_data[CFG_ADAPTER_MODE].value,
  521. cfg_data[CFG_TAGS].value,
  522. cfg_data[CFG_RESET_DELAY].value);
  523. for (i = 0; i < CFG_NUM; i++)
  524. {
  525. if (cfg_data[i].value < cfg_data[i].min
  526. || cfg_data[i].value > cfg_data[i].max)
  527. cfg_data[i].value = cfg_data[i].def;
  528. }
  529. }
  530. /*
  531. * Mapping from the eeprom delay index value (index into this array)
  532. * to the number of actual seconds that the delay should be for.
  533. */
  534. static char __devinitdata eeprom_index_to_delay_map[] =
  535. { 1, 3, 5, 10, 16, 30, 60, 120 };
  536. /**
  537. * eeprom_index_to_delay - Take the eeprom delay setting and convert it
  538. * into a number of seconds.
  539. *
  540. * @eeprom: The eeprom structure in which we find the delay index to map.
  541. **/
  542. static void __devinit eeprom_index_to_delay(struct NvRamType *eeprom)
  543. {
  544. eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
  545. }
  546. /**
  547. * delay_to_eeprom_index - Take a delay in seconds and return the
  548. * closest eeprom index which will delay for at least that amount of
  549. * seconds.
  550. *
  551. * @delay: The delay, in seconds, to find the eeprom index for.
  552. **/
  553. static int __devinit delay_to_eeprom_index(int delay)
  554. {
  555. u8 idx = 0;
  556. while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
  557. idx++;
  558. return idx;
  559. }
  560. /**
  561. * eeprom_override - Override the eeprom settings, in the provided
  562. * eeprom structure, with values that have been set on the command
  563. * line.
  564. *
  565. * @eeprom: The eeprom data to override with command line options.
  566. **/
  567. static void __devinit eeprom_override(struct NvRamType *eeprom)
  568. {
  569. u8 id;
  570. /* Adapter Settings */
  571. if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
  572. eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
  573. if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
  574. eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
  575. if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
  576. eeprom->delay_time = delay_to_eeprom_index(
  577. cfg_data[CFG_RESET_DELAY].value);
  578. if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
  579. eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
  580. /* Device Settings */
  581. for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
  582. if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
  583. eeprom->target[id].cfg0 =
  584. (u8)cfg_data[CFG_DEV_MODE].value;
  585. if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
  586. eeprom->target[id].period =
  587. (u8)cfg_data[CFG_MAX_SPEED].value;
  588. }
  589. }
  590. /*---------------------------------------------------------------------------
  591. ---------------------------------------------------------------------------*/
  592. static unsigned int list_size(struct list_head *head)
  593. {
  594. unsigned int count = 0;
  595. struct list_head *pos;
  596. list_for_each(pos, head)
  597. count++;
  598. return count;
  599. }
  600. static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
  601. struct DeviceCtlBlk *pos)
  602. {
  603. int use_next = 0;
  604. struct DeviceCtlBlk* next = NULL;
  605. struct DeviceCtlBlk* i;
  606. if (list_empty(head))
  607. return NULL;
  608. /* find supplied dcb and then select the next one */
  609. list_for_each_entry(i, head, list)
  610. if (use_next) {
  611. next = i;
  612. break;
  613. } else if (i == pos) {
  614. use_next = 1;
  615. }
  616. /* if no next one take the head one (ie, wraparound) */
  617. if (!next)
  618. list_for_each_entry(i, head, list) {
  619. next = i;
  620. break;
  621. }
  622. return next;
  623. }
  624. static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  625. {
  626. if (srb->tag_number < 255) {
  627. dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */
  628. srb->tag_number = 255;
  629. }
  630. }
  631. /* Find cmd in SRB list */
  632. static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
  633. struct list_head *head)
  634. {
  635. struct ScsiReqBlk *i;
  636. list_for_each_entry(i, head, list)
  637. if (i->cmd == cmd)
  638. return i;
  639. return NULL;
  640. }
  641. static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
  642. {
  643. struct list_head *head = &acb->srb_free_list;
  644. struct ScsiReqBlk *srb = NULL;
  645. if (!list_empty(head)) {
  646. srb = list_entry(head->next, struct ScsiReqBlk, list);
  647. list_del(head->next);
  648. dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
  649. }
  650. return srb;
  651. }
  652. static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  653. {
  654. dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
  655. list_add_tail(&srb->list, &acb->srb_free_list);
  656. }
  657. static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
  658. struct ScsiReqBlk *srb)
  659. {
  660. dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
  661. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  662. list_add(&srb->list, &dcb->srb_waiting_list);
  663. }
  664. static void srb_waiting_append(struct DeviceCtlBlk *dcb,
  665. struct ScsiReqBlk *srb)
  666. {
  667. dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
  668. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  669. list_add_tail(&srb->list, &dcb->srb_waiting_list);
  670. }
  671. static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  672. {
  673. dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
  674. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  675. list_add_tail(&srb->list, &dcb->srb_going_list);
  676. }
  677. static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  678. {
  679. struct ScsiReqBlk *i;
  680. struct ScsiReqBlk *tmp;
  681. dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
  682. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  683. list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
  684. if (i == srb) {
  685. list_del(&srb->list);
  686. break;
  687. }
  688. }
  689. static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
  690. struct ScsiReqBlk *srb)
  691. {
  692. struct ScsiReqBlk *i;
  693. struct ScsiReqBlk *tmp;
  694. dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
  695. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  696. list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
  697. if (i == srb) {
  698. list_del(&srb->list);
  699. break;
  700. }
  701. }
  702. static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
  703. struct ScsiReqBlk *srb)
  704. {
  705. dprintkdbg(DBG_0,
  706. "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
  707. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  708. list_move(&srb->list, &dcb->srb_waiting_list);
  709. }
  710. static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
  711. struct ScsiReqBlk *srb)
  712. {
  713. dprintkdbg(DBG_0,
  714. "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
  715. srb->cmd, dcb->target_id, dcb->target_lun, srb);
  716. list_move(&srb->list, &dcb->srb_going_list);
  717. }
  718. /* Sets the timer to wake us up */
  719. static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
  720. {
  721. if (timer_pending(&acb->waiting_timer))
  722. return;
  723. init_timer(&acb->waiting_timer);
  724. acb->waiting_timer.function = waiting_timeout;
  725. acb->waiting_timer.data = (unsigned long) acb;
  726. if (time_before(jiffies + to, acb->scsi_host->last_reset - HZ / 2))
  727. acb->waiting_timer.expires =
  728. acb->scsi_host->last_reset - HZ / 2 + 1;
  729. else
  730. acb->waiting_timer.expires = jiffies + to + 1;
  731. add_timer(&acb->waiting_timer);
  732. }
  733. /* Send the next command from the waiting list to the bus */
  734. static void waiting_process_next(struct AdapterCtlBlk *acb)
  735. {
  736. struct DeviceCtlBlk *start = NULL;
  737. struct DeviceCtlBlk *pos;
  738. struct DeviceCtlBlk *dcb;
  739. struct ScsiReqBlk *srb;
  740. struct list_head *dcb_list_head = &acb->dcb_list;
  741. if (acb->active_dcb
  742. || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
  743. return;
  744. if (timer_pending(&acb->waiting_timer))
  745. del_timer(&acb->waiting_timer);
  746. if (list_empty(dcb_list_head))
  747. return;
  748. /*
  749. * Find the starting dcb. Need to find it again in the list
  750. * since the list may have changed since we set the ptr to it
  751. */
  752. list_for_each_entry(dcb, dcb_list_head, list)
  753. if (dcb == acb->dcb_run_robin) {
  754. start = dcb;
  755. break;
  756. }
  757. if (!start) {
  758. /* This can happen! */
  759. start = list_entry(dcb_list_head->next, typeof(*start), list);
  760. acb->dcb_run_robin = start;
  761. }
  762. /*
  763. * Loop over the dcb, but we start somewhere (potentially) in
  764. * the middle of the loop so we need to manully do this.
  765. */
  766. pos = start;
  767. do {
  768. struct list_head *waiting_list_head = &pos->srb_waiting_list;
  769. /* Make sure, the next another device gets scheduled ... */
  770. acb->dcb_run_robin = dcb_get_next(dcb_list_head,
  771. acb->dcb_run_robin);
  772. if (list_empty(waiting_list_head) ||
  773. pos->max_command <= list_size(&pos->srb_going_list)) {
  774. /* move to next dcb */
  775. pos = dcb_get_next(dcb_list_head, pos);
  776. } else {
  777. srb = list_entry(waiting_list_head->next,
  778. struct ScsiReqBlk, list);
  779. /* Try to send to the bus */
  780. if (!start_scsi(acb, pos, srb))
  781. srb_waiting_to_going_move(pos, srb);
  782. else
  783. waiting_set_timer(acb, HZ/50);
  784. break;
  785. }
  786. } while (pos != start);
  787. }
  788. /* Wake up waiting queue */
  789. static void waiting_timeout(unsigned long ptr)
  790. {
  791. unsigned long flags;
  792. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
  793. dprintkdbg(DBG_1,
  794. "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
  795. DC395x_LOCK_IO(acb->scsi_host, flags);
  796. waiting_process_next(acb);
  797. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  798. }
  799. /* Get the DCB for a given ID/LUN combination */
  800. static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
  801. {
  802. return acb->children[id][lun];
  803. }
  804. /* Send SCSI Request Block (srb) to adapter (acb) */
  805. static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  806. {
  807. struct DeviceCtlBlk *dcb = srb->dcb;
  808. if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
  809. acb->active_dcb ||
  810. (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
  811. srb_waiting_append(dcb, srb);
  812. waiting_process_next(acb);
  813. return;
  814. }
  815. if (!start_scsi(acb, dcb, srb))
  816. srb_going_append(dcb, srb);
  817. else {
  818. srb_waiting_insert(dcb, srb);
  819. waiting_set_timer(acb, HZ / 50);
  820. }
  821. }
  822. /* Prepare SRB for being sent to Device DCB w/ command *cmd */
  823. static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
  824. struct ScsiReqBlk *srb)
  825. {
  826. int nseg;
  827. enum dma_data_direction dir = cmd->sc_data_direction;
  828. dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
  829. cmd, dcb->target_id, dcb->target_lun);
  830. srb->dcb = dcb;
  831. srb->cmd = cmd;
  832. srb->sg_count = 0;
  833. srb->total_xfer_length = 0;
  834. srb->sg_bus_addr = 0;
  835. srb->sg_index = 0;
  836. srb->adapter_status = 0;
  837. srb->target_status = 0;
  838. srb->msg_count = 0;
  839. srb->status = 0;
  840. srb->flag = 0;
  841. srb->state = 0;
  842. srb->retry_count = 0;
  843. srb->tag_number = TAG_NONE;
  844. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  845. srb->end_message = 0;
  846. nseg = scsi_dma_map(cmd);
  847. BUG_ON(nseg < 0);
  848. if (dir == PCI_DMA_NONE || !nseg) {
  849. dprintkdbg(DBG_0,
  850. "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
  851. cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
  852. srb->segment_x[0].address);
  853. } else {
  854. int i;
  855. u32 reqlen = scsi_bufflen(cmd);
  856. struct scatterlist *sg;
  857. struct SGentry *sgp = srb->segment_x;
  858. srb->sg_count = nseg;
  859. dprintkdbg(DBG_0,
  860. "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
  861. reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
  862. srb->sg_count);
  863. scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
  864. u32 busaddr = (u32)sg_dma_address(sg);
  865. u32 seglen = (u32)sg->length;
  866. sgp[i].address = busaddr;
  867. sgp[i].length = seglen;
  868. srb->total_xfer_length += seglen;
  869. }
  870. sgp += srb->sg_count - 1;
  871. /*
  872. * adjust last page if too big as it is allocated
  873. * on even page boundaries
  874. */
  875. if (srb->total_xfer_length > reqlen) {
  876. sgp->length -= (srb->total_xfer_length - reqlen);
  877. srb->total_xfer_length = reqlen;
  878. }
  879. /* Fixup for WIDE padding - make sure length is even */
  880. if (dcb->sync_period & WIDE_SYNC &&
  881. srb->total_xfer_length % 2) {
  882. srb->total_xfer_length++;
  883. sgp->length++;
  884. }
  885. srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
  886. srb->segment_x,
  887. SEGMENTX_LEN,
  888. PCI_DMA_TODEVICE);
  889. dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
  890. srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
  891. }
  892. srb->request_length = srb->total_xfer_length;
  893. }
  894. /**
  895. * dc395x_queue_command - queue scsi command passed from the mid
  896. * layer, invoke 'done' on completion
  897. *
  898. * @cmd: pointer to scsi command object
  899. * @done: function pointer to be invoked on completion
  900. *
  901. * Returns 1 if the adapter (host) is busy, else returns 0. One
  902. * reason for an adapter to be busy is that the number
  903. * of outstanding queued commands is already equal to
  904. * struct Scsi_Host::can_queue .
  905. *
  906. * Required: if struct Scsi_Host::can_queue is ever non-zero
  907. * then this function is required.
  908. *
  909. * Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
  910. * and is expected to be held on return.
  911. *
  912. **/
  913. static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
  914. {
  915. struct DeviceCtlBlk *dcb;
  916. struct ScsiReqBlk *srb;
  917. struct AdapterCtlBlk *acb =
  918. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  919. dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
  920. cmd, cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
  921. /* Assume BAD_TARGET; will be cleared later */
  922. cmd->result = DID_BAD_TARGET << 16;
  923. /* ignore invalid targets */
  924. if (cmd->device->id >= acb->scsi_host->max_id ||
  925. cmd->device->lun >= acb->scsi_host->max_lun ||
  926. cmd->device->lun >31) {
  927. goto complete;
  928. }
  929. /* does the specified lun on the specified device exist */
  930. if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
  931. dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
  932. cmd->device->id, cmd->device->lun);
  933. goto complete;
  934. }
  935. /* do we have a DCB for the device */
  936. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  937. if (!dcb) {
  938. /* should never happen */
  939. dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
  940. cmd->device->id, cmd->device->lun);
  941. goto complete;
  942. }
  943. /* set callback and clear result in the command */
  944. cmd->scsi_done = done;
  945. cmd->result = 0;
  946. srb = srb_get_free(acb);
  947. if (!srb)
  948. {
  949. /*
  950. * Return 1 since we are unable to queue this command at this
  951. * point in time.
  952. */
  953. dprintkdbg(DBG_0, "queue_command: No free srb's\n");
  954. return 1;
  955. }
  956. build_srb(cmd, dcb, srb);
  957. if (!list_empty(&dcb->srb_waiting_list)) {
  958. /* append to waiting queue */
  959. srb_waiting_append(dcb, srb);
  960. waiting_process_next(acb);
  961. } else {
  962. /* process immediately */
  963. send_srb(acb, srb);
  964. }
  965. dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
  966. return 0;
  967. complete:
  968. /*
  969. * Complete the command immediatey, and then return 0 to
  970. * indicate that we have handled the command. This is usually
  971. * done when the commad is for things like non existent
  972. * devices.
  973. */
  974. done(cmd);
  975. return 0;
  976. }
  977. static DEF_SCSI_QCMD(dc395x_queue_command)
  978. /*
  979. * Return the disk geometry for the given SCSI device.
  980. */
  981. static int dc395x_bios_param(struct scsi_device *sdev,
  982. struct block_device *bdev, sector_t capacity, int *info)
  983. {
  984. #ifdef CONFIG_SCSI_DC395x_TRMS1040_TRADMAP
  985. int heads, sectors, cylinders;
  986. struct AdapterCtlBlk *acb;
  987. int size = capacity;
  988. dprintkdbg(DBG_0, "dc395x_bios_param..............\n");
  989. acb = (struct AdapterCtlBlk *)sdev->host->hostdata;
  990. heads = 64;
  991. sectors = 32;
  992. cylinders = size / (heads * sectors);
  993. if ((acb->gmode2 & NAC_GREATER_1G) && (cylinders > 1024)) {
  994. heads = 255;
  995. sectors = 63;
  996. cylinders = size / (heads * sectors);
  997. }
  998. geom[0] = heads;
  999. geom[1] = sectors;
  1000. geom[2] = cylinders;
  1001. return 0;
  1002. #else
  1003. return scsicam_bios_param(bdev, capacity, info);
  1004. #endif
  1005. }
  1006. static void dump_register_info(struct AdapterCtlBlk *acb,
  1007. struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
  1008. {
  1009. u16 pstat;
  1010. struct pci_dev *dev = acb->dev;
  1011. pci_read_config_word(dev, PCI_STATUS, &pstat);
  1012. if (!dcb)
  1013. dcb = acb->active_dcb;
  1014. if (!srb && dcb)
  1015. srb = dcb->active_srb;
  1016. if (srb) {
  1017. if (!srb->cmd)
  1018. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
  1019. srb, srb->cmd);
  1020. else
  1021. dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
  1022. "cmnd=0x%02x <%02i-%i>\n",
  1023. srb, srb->cmd,
  1024. srb->cmd->cmnd[0], srb->cmd->device->id,
  1025. srb->cmd->device->lun);
  1026. printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
  1027. srb->segment_x, srb->sg_count, srb->sg_index,
  1028. srb->total_xfer_length);
  1029. printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
  1030. srb->state, srb->status, srb->scsi_phase,
  1031. (acb->active_dcb) ? "" : "not");
  1032. }
  1033. dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
  1034. "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
  1035. "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
  1036. "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
  1037. DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
  1038. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1039. DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
  1040. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
  1041. DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
  1042. DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
  1043. DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
  1044. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  1045. DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
  1046. DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
  1047. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
  1048. DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
  1049. DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
  1050. dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
  1051. "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
  1052. "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
  1053. DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
  1054. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1055. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1056. DC395x_read8(acb, TRM_S1040_DMA_STATUS),
  1057. DC395x_read8(acb, TRM_S1040_DMA_INTEN),
  1058. DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
  1059. DC395x_read32(acb, TRM_S1040_DMA_XCNT),
  1060. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  1061. DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
  1062. DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
  1063. dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
  1064. "pci{status=0x%04x}\n",
  1065. DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
  1066. DC395x_read8(acb, TRM_S1040_GEN_STATUS),
  1067. DC395x_read8(acb, TRM_S1040_GEN_TIMER),
  1068. pstat);
  1069. }
  1070. static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
  1071. {
  1072. #if debug_enabled(DBG_FIFO)
  1073. u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  1074. u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  1075. if (!(fifocnt & 0x40))
  1076. dprintkdbg(DBG_FIFO,
  1077. "clear_fifo: (%i bytes) on phase %02x in %s\n",
  1078. fifocnt & 0x3f, lines, txt);
  1079. #endif
  1080. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
  1081. }
  1082. static void reset_dev_param(struct AdapterCtlBlk *acb)
  1083. {
  1084. struct DeviceCtlBlk *dcb;
  1085. struct NvRamType *eeprom = &acb->eeprom;
  1086. dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
  1087. list_for_each_entry(dcb, &acb->dcb_list, list) {
  1088. u8 period_index;
  1089. dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
  1090. dcb->sync_period = 0;
  1091. dcb->sync_offset = 0;
  1092. dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
  1093. period_index = eeprom->target[dcb->target_id].period & 0x07;
  1094. dcb->min_nego_period = clock_period[period_index];
  1095. if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
  1096. || !(acb->config & HCC_WIDE_CARD))
  1097. dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
  1098. }
  1099. }
  1100. /*
  1101. * perform a hard reset on the SCSI bus
  1102. * @cmd - some command for this host (for fetching hooks)
  1103. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  1104. */
  1105. static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  1106. {
  1107. struct AdapterCtlBlk *acb =
  1108. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  1109. dprintkl(KERN_INFO,
  1110. "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
  1111. cmd, cmd->device->id, cmd->device->lun, cmd);
  1112. if (timer_pending(&acb->waiting_timer))
  1113. del_timer(&acb->waiting_timer);
  1114. /*
  1115. * disable interrupt
  1116. */
  1117. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  1118. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  1119. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  1120. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  1121. reset_scsi_bus(acb);
  1122. udelay(500);
  1123. /* We may be in serious trouble. Wait some seconds */
  1124. acb->scsi_host->last_reset =
  1125. jiffies + 3 * HZ / 2 +
  1126. HZ * acb->eeprom.delay_time;
  1127. /*
  1128. * re-enable interrupt
  1129. */
  1130. /* Clear SCSI FIFO */
  1131. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1132. clear_fifo(acb, "eh_bus_reset");
  1133. /* Delete pending IRQ */
  1134. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1135. set_basic_config(acb);
  1136. reset_dev_param(acb);
  1137. doing_srb_done(acb, DID_RESET, cmd, 0);
  1138. acb->active_dcb = NULL;
  1139. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE ,RESET_DEV */
  1140. waiting_process_next(acb);
  1141. return SUCCESS;
  1142. }
  1143. static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
  1144. {
  1145. int rc;
  1146. spin_lock_irq(cmd->device->host->host_lock);
  1147. rc = __dc395x_eh_bus_reset(cmd);
  1148. spin_unlock_irq(cmd->device->host->host_lock);
  1149. return rc;
  1150. }
  1151. /*
  1152. * abort an errant SCSI command
  1153. * @cmd - command to be aborted
  1154. * Returns: SUCCESS (0x2002) on success, else FAILED (0x2003).
  1155. */
  1156. static int dc395x_eh_abort(struct scsi_cmnd *cmd)
  1157. {
  1158. /*
  1159. * Look into our command queues: If it has not been sent already,
  1160. * we remove it and return success. Otherwise fail.
  1161. */
  1162. struct AdapterCtlBlk *acb =
  1163. (struct AdapterCtlBlk *)cmd->device->host->hostdata;
  1164. struct DeviceCtlBlk *dcb;
  1165. struct ScsiReqBlk *srb;
  1166. dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
  1167. cmd, cmd->device->id, cmd->device->lun, cmd);
  1168. dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
  1169. if (!dcb) {
  1170. dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
  1171. return FAILED;
  1172. }
  1173. srb = find_cmd(cmd, &dcb->srb_waiting_list);
  1174. if (srb) {
  1175. srb_waiting_remove(dcb, srb);
  1176. pci_unmap_srb_sense(acb, srb);
  1177. pci_unmap_srb(acb, srb);
  1178. free_tag(dcb, srb);
  1179. srb_free_insert(acb, srb);
  1180. dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
  1181. cmd->result = DID_ABORT << 16;
  1182. return SUCCESS;
  1183. }
  1184. srb = find_cmd(cmd, &dcb->srb_going_list);
  1185. if (srb) {
  1186. dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
  1187. /* XXX: Should abort the command here */
  1188. } else {
  1189. dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
  1190. }
  1191. return FAILED;
  1192. }
  1193. /* SDTR */
  1194. static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1195. struct ScsiReqBlk *srb)
  1196. {
  1197. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1198. if (srb->msg_count > 1) {
  1199. dprintkl(KERN_INFO,
  1200. "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1201. srb->msg_count, srb->msgout_buf[0],
  1202. srb->msgout_buf[1]);
  1203. return;
  1204. }
  1205. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
  1206. dcb->sync_offset = 0;
  1207. dcb->min_nego_period = 200 >> 2;
  1208. } else if (dcb->sync_offset == 0)
  1209. dcb->sync_offset = SYNC_NEGO_OFFSET;
  1210. *ptr++ = MSG_EXTENDED; /* (01h) */
  1211. *ptr++ = 3; /* length */
  1212. *ptr++ = EXTENDED_SDTR; /* (01h) */
  1213. *ptr++ = dcb->min_nego_period; /* Transfer period (in 4ns) */
  1214. *ptr++ = dcb->sync_offset; /* Transfer period (max. REQ/ACK dist) */
  1215. srb->msg_count += 5;
  1216. srb->state |= SRB_DO_SYNC_NEGO;
  1217. }
  1218. /* WDTR */
  1219. static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  1220. struct ScsiReqBlk *srb)
  1221. {
  1222. u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
  1223. (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
  1224. u8 *ptr = srb->msgout_buf + srb->msg_count;
  1225. if (srb->msg_count > 1) {
  1226. dprintkl(KERN_INFO,
  1227. "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
  1228. srb->msg_count, srb->msgout_buf[0],
  1229. srb->msgout_buf[1]);
  1230. return;
  1231. }
  1232. *ptr++ = MSG_EXTENDED; /* (01h) */
  1233. *ptr++ = 2; /* length */
  1234. *ptr++ = EXTENDED_WDTR; /* (03h) */
  1235. *ptr++ = wide;
  1236. srb->msg_count += 4;
  1237. srb->state |= SRB_DO_WIDE_NEGO;
  1238. }
  1239. #if 0
  1240. /* Timer to work around chip flaw: When selecting and the bus is
  1241. * busy, we sometimes miss a Selection timeout IRQ */
  1242. void selection_timeout_missed(unsigned long ptr);
  1243. /* Sets the timer to wake us up */
  1244. static void selto_timer(struct AdapterCtlBlk *acb)
  1245. {
  1246. if (timer_pending(&acb->selto_timer))
  1247. return;
  1248. acb->selto_timer.function = selection_timeout_missed;
  1249. acb->selto_timer.data = (unsigned long) acb;
  1250. if (time_before
  1251. (jiffies + HZ, acb->scsi_host->last_reset + HZ / 2))
  1252. acb->selto_timer.expires =
  1253. acb->scsi_host->last_reset + HZ / 2 + 1;
  1254. else
  1255. acb->selto_timer.expires = jiffies + HZ + 1;
  1256. add_timer(&acb->selto_timer);
  1257. }
  1258. void selection_timeout_missed(unsigned long ptr)
  1259. {
  1260. unsigned long flags;
  1261. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
  1262. struct ScsiReqBlk *srb;
  1263. dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
  1264. if (!acb->active_dcb || !acb->active_dcb->active_srb) {
  1265. dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
  1266. return;
  1267. }
  1268. DC395x_LOCK_IO(acb->scsi_host, flags);
  1269. srb = acb->active_dcb->active_srb;
  1270. disconnect(acb);
  1271. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1272. }
  1273. #endif
  1274. static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
  1275. struct ScsiReqBlk* srb)
  1276. {
  1277. u16 s_stat2, return_code;
  1278. u8 s_stat, scsicommand, i, identify_message;
  1279. u8 *ptr;
  1280. dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
  1281. dcb->target_id, dcb->target_lun, srb);
  1282. srb->tag_number = TAG_NONE; /* acb->tag_max_num: had error read in eeprom */
  1283. s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  1284. s_stat2 = 0;
  1285. s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1286. #if 1
  1287. if (s_stat & 0x20 /* s_stat2 & 0x02000 */ ) {
  1288. dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
  1289. s_stat, s_stat2);
  1290. /*
  1291. * Try anyway?
  1292. *
  1293. * We could, BUT: Sometimes the TRM_S1040 misses to produce a Selection
  1294. * Timeout, a Disconnect or a Reselection IRQ, so we would be screwed!
  1295. * (This is likely to be a bug in the hardware. Obviously, most people
  1296. * only have one initiator per SCSI bus.)
  1297. * Instead let this fail and have the timer make sure the command is
  1298. * tried again after a short time
  1299. */
  1300. /*selto_timer (acb); */
  1301. return 1;
  1302. }
  1303. #endif
  1304. if (acb->active_dcb) {
  1305. dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
  1306. "command while another command (0x%p) is active.",
  1307. srb->cmd,
  1308. acb->active_dcb->active_srb ?
  1309. acb->active_dcb->active_srb->cmd : 0);
  1310. return 1;
  1311. }
  1312. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1313. dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
  1314. return 1;
  1315. }
  1316. /* Allow starting of SCSI commands half a second before we allow the mid-level
  1317. * to queue them again after a reset */
  1318. if (time_before(jiffies, acb->scsi_host->last_reset - HZ / 2)) {
  1319. dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
  1320. return 1;
  1321. }
  1322. /* Flush FIFO */
  1323. clear_fifo(acb, "start_scsi");
  1324. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  1325. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  1326. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  1327. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  1328. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1329. identify_message = dcb->identify_msg;
  1330. /*DC395x_TRM_write8(TRM_S1040_SCSI_IDMSG, identify_message); */
  1331. /* Don't allow disconnection for AUTO_REQSENSE: Cont.All.Cond.! */
  1332. if (srb->flag & AUTO_REQSENSE)
  1333. identify_message &= 0xBF;
  1334. if (((srb->cmd->cmnd[0] == INQUIRY)
  1335. || (srb->cmd->cmnd[0] == REQUEST_SENSE)
  1336. || (srb->flag & AUTO_REQSENSE))
  1337. && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
  1338. && !(dcb->sync_mode & WIDE_NEGO_DONE))
  1339. || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  1340. && !(dcb->sync_mode & SYNC_NEGO_DONE)))
  1341. && (dcb->target_lun == 0)) {
  1342. srb->msgout_buf[0] = identify_message;
  1343. srb->msg_count = 1;
  1344. scsicommand = SCMD_SEL_ATNSTOP;
  1345. srb->state = SRB_MSGOUT;
  1346. #ifndef SYNC_FIRST
  1347. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1348. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1349. build_wdtr(acb, dcb, srb);
  1350. goto no_cmd;
  1351. }
  1352. #endif
  1353. if (dcb->sync_mode & SYNC_NEGO_ENABLE
  1354. && dcb->inquiry7 & SCSI_INQ_SYNC) {
  1355. build_sdtr(acb, dcb, srb);
  1356. goto no_cmd;
  1357. }
  1358. if (dcb->sync_mode & WIDE_NEGO_ENABLE
  1359. && dcb->inquiry7 & SCSI_INQ_WBUS16) {
  1360. build_wdtr(acb, dcb, srb);
  1361. goto no_cmd;
  1362. }
  1363. srb->msg_count = 0;
  1364. }
  1365. /* Send identify message */
  1366. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
  1367. scsicommand = SCMD_SEL_ATN;
  1368. srb->state = SRB_START_;
  1369. #ifndef DC395x_NO_TAGQ
  1370. if ((dcb->sync_mode & EN_TAG_QUEUEING)
  1371. && (identify_message & 0xC0)) {
  1372. /* Send Tag message */
  1373. u32 tag_mask = 1;
  1374. u8 tag_number = 0;
  1375. while (tag_mask & dcb->tag_mask
  1376. && tag_number < dcb->max_command) {
  1377. tag_mask = tag_mask << 1;
  1378. tag_number++;
  1379. }
  1380. if (tag_number >= dcb->max_command) {
  1381. dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
  1382. "Out of tags target=<%02i-%i>)\n",
  1383. srb->cmd, srb->cmd->device->id,
  1384. srb->cmd->device->lun);
  1385. srb->state = SRB_READY;
  1386. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1387. DO_HWRESELECT);
  1388. return 1;
  1389. }
  1390. /* Send Tag id */
  1391. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
  1392. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
  1393. dcb->tag_mask |= tag_mask;
  1394. srb->tag_number = tag_number;
  1395. scsicommand = SCMD_SEL_ATN3;
  1396. srb->state = SRB_START_;
  1397. }
  1398. #endif
  1399. /*polling:*/
  1400. /* Send CDB ..command block ......... */
  1401. dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
  1402. srb->cmd, srb->cmd->device->id, srb->cmd->device->lun,
  1403. srb->cmd->cmnd[0], srb->tag_number);
  1404. if (srb->flag & AUTO_REQSENSE) {
  1405. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1406. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1407. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1408. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1409. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
  1410. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1411. } else {
  1412. ptr = (u8 *)srb->cmd->cmnd;
  1413. for (i = 0; i < srb->cmd->cmd_len; i++)
  1414. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1415. }
  1416. no_cmd:
  1417. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1418. DO_HWRESELECT | DO_DATALATCH);
  1419. if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
  1420. /*
  1421. * If start_scsi return 1:
  1422. * we caught an interrupt (must be reset or reselection ... )
  1423. * : Let's process it first!
  1424. */
  1425. dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
  1426. srb->cmd, dcb->target_id, dcb->target_lun);
  1427. srb->state = SRB_READY;
  1428. free_tag(dcb, srb);
  1429. srb->msg_count = 0;
  1430. return_code = 1;
  1431. /* This IRQ should NOT get lost, as we did not acknowledge it */
  1432. } else {
  1433. /*
  1434. * If start_scsi returns 0:
  1435. * we know that the SCSI processor is free
  1436. */
  1437. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  1438. dcb->active_srb = srb;
  1439. acb->active_dcb = dcb;
  1440. return_code = 0;
  1441. /* it's important for atn stop */
  1442. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  1443. DO_DATALATCH | DO_HWRESELECT);
  1444. /* SCSI command */
  1445. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
  1446. }
  1447. return return_code;
  1448. }
  1449. #define DC395x_ENABLE_MSGOUT \
  1450. DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
  1451. srb->state |= SRB_MSGOUT
  1452. /* abort command */
  1453. static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
  1454. struct ScsiReqBlk *srb)
  1455. {
  1456. srb->msgout_buf[0] = ABORT;
  1457. srb->msg_count = 1;
  1458. DC395x_ENABLE_MSGOUT;
  1459. srb->state &= ~SRB_MSGIN;
  1460. srb->state |= SRB_MSGOUT;
  1461. }
  1462. /**
  1463. * dc395x_handle_interrupt - Handle an interrupt that has been confirmed to
  1464. * have been triggered for this card.
  1465. *
  1466. * @acb: a pointer to the adpter control block
  1467. * @scsi_status: the status return when we checked the card
  1468. **/
  1469. static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
  1470. u16 scsi_status)
  1471. {
  1472. struct DeviceCtlBlk *dcb;
  1473. struct ScsiReqBlk *srb;
  1474. u16 phase;
  1475. u8 scsi_intstatus;
  1476. unsigned long flags;
  1477. void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
  1478. u16 *);
  1479. DC395x_LOCK_IO(acb->scsi_host, flags);
  1480. /* This acknowledges the IRQ */
  1481. scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  1482. if ((scsi_status & 0x2007) == 0x2002)
  1483. dprintkl(KERN_DEBUG,
  1484. "COP after COP completed? %04x\n", scsi_status);
  1485. if (debug_enabled(DBG_KG)) {
  1486. if (scsi_intstatus & INT_SELTIMEOUT)
  1487. dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
  1488. }
  1489. /*dprintkl(KERN_DEBUG, "handle_interrupt: intstatus = 0x%02x ", scsi_intstatus); */
  1490. if (timer_pending(&acb->selto_timer))
  1491. del_timer(&acb->selto_timer);
  1492. if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
  1493. disconnect(acb); /* bus free interrupt */
  1494. goto out_unlock;
  1495. }
  1496. if (scsi_intstatus & INT_RESELECTED) {
  1497. reselect(acb);
  1498. goto out_unlock;
  1499. }
  1500. if (scsi_intstatus & INT_SELECT) {
  1501. dprintkl(KERN_INFO, "Host does not support target mode!\n");
  1502. goto out_unlock;
  1503. }
  1504. if (scsi_intstatus & INT_SCSIRESET) {
  1505. scsi_reset_detect(acb);
  1506. goto out_unlock;
  1507. }
  1508. if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
  1509. dcb = acb->active_dcb;
  1510. if (!dcb) {
  1511. dprintkl(KERN_DEBUG,
  1512. "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
  1513. scsi_status, scsi_intstatus);
  1514. goto out_unlock;
  1515. }
  1516. srb = dcb->active_srb;
  1517. if (dcb->flag & ABORT_DEV_) {
  1518. dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
  1519. enable_msgout_abort(acb, srb);
  1520. }
  1521. /* software sequential machine */
  1522. phase = (u16)srb->scsi_phase;
  1523. /*
  1524. * 62037 or 62137
  1525. * call dc395x_scsi_phase0[]... "phase entry"
  1526. * handle every phase before start transfer
  1527. */
  1528. /* data_out_phase0, phase:0 */
  1529. /* data_in_phase0, phase:1 */
  1530. /* command_phase0, phase:2 */
  1531. /* status_phase0, phase:3 */
  1532. /* nop0, phase:4 PH_BUS_FREE .. initial phase */
  1533. /* nop0, phase:5 PH_BUS_FREE .. initial phase */
  1534. /* msgout_phase0, phase:6 */
  1535. /* msgin_phase0, phase:7 */
  1536. dc395x_statev = dc395x_scsi_phase0[phase];
  1537. dc395x_statev(acb, srb, &scsi_status);
  1538. /*
  1539. * if there were any exception occurred scsi_status
  1540. * will be modify to bus free phase new scsi_status
  1541. * transfer out from ... previous dc395x_statev
  1542. */
  1543. srb->scsi_phase = scsi_status & PHASEMASK;
  1544. phase = (u16)scsi_status & PHASEMASK;
  1545. /*
  1546. * call dc395x_scsi_phase1[]... "phase entry" handle
  1547. * every phase to do transfer
  1548. */
  1549. /* data_out_phase1, phase:0 */
  1550. /* data_in_phase1, phase:1 */
  1551. /* command_phase1, phase:2 */
  1552. /* status_phase1, phase:3 */
  1553. /* nop1, phase:4 PH_BUS_FREE .. initial phase */
  1554. /* nop1, phase:5 PH_BUS_FREE .. initial phase */
  1555. /* msgout_phase1, phase:6 */
  1556. /* msgin_phase1, phase:7 */
  1557. dc395x_statev = dc395x_scsi_phase1[phase];
  1558. dc395x_statev(acb, srb, &scsi_status);
  1559. }
  1560. out_unlock:
  1561. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  1562. }
  1563. static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
  1564. {
  1565. struct AdapterCtlBlk *acb = dev_id;
  1566. u16 scsi_status;
  1567. u8 dma_status;
  1568. irqreturn_t handled = IRQ_NONE;
  1569. /*
  1570. * Check for pending interrupt
  1571. */
  1572. scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
  1573. dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  1574. if (scsi_status & SCSIINTERRUPT) {
  1575. /* interrupt pending - let's process it! */
  1576. dc395x_handle_interrupt(acb, scsi_status);
  1577. handled = IRQ_HANDLED;
  1578. }
  1579. else if (dma_status & 0x20) {
  1580. /* Error from the DMA engine */
  1581. dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
  1582. #if 0
  1583. dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
  1584. if (acb->active_dcb) {
  1585. acb->active_dcb-> flag |= ABORT_DEV_;
  1586. if (acb->active_dcb->active_srb)
  1587. enable_msgout_abort(acb, acb->active_dcb->active_srb);
  1588. }
  1589. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
  1590. #else
  1591. dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
  1592. acb = NULL;
  1593. #endif
  1594. handled = IRQ_HANDLED;
  1595. }
  1596. return handled;
  1597. }
  1598. static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1599. u16 *pscsi_status)
  1600. {
  1601. dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
  1602. if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
  1603. *pscsi_status = PH_BUS_FREE; /*.. initial phase */
  1604. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  1605. srb->state &= ~SRB_MSGOUT;
  1606. }
  1607. static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1608. u16 *pscsi_status)
  1609. {
  1610. u16 i;
  1611. u8 *ptr;
  1612. dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
  1613. clear_fifo(acb, "msgout_phase1");
  1614. if (!(srb->state & SRB_MSGOUT)) {
  1615. srb->state |= SRB_MSGOUT;
  1616. dprintkl(KERN_DEBUG,
  1617. "msgout_phase1: (0x%p) Phase unexpected\n",
  1618. srb->cmd); /* So what ? */
  1619. }
  1620. if (!srb->msg_count) {
  1621. dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
  1622. srb->cmd);
  1623. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
  1624. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  1625. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1626. return;
  1627. }
  1628. ptr = (u8 *)srb->msgout_buf;
  1629. for (i = 0; i < srb->msg_count; i++)
  1630. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
  1631. srb->msg_count = 0;
  1632. if (srb->msgout_buf[0] == MSG_ABORT)
  1633. srb->state = SRB_ABORT_SENT;
  1634. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1635. }
  1636. static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1637. u16 *pscsi_status)
  1638. {
  1639. dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
  1640. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1641. }
  1642. static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1643. u16 *pscsi_status)
  1644. {
  1645. struct DeviceCtlBlk *dcb;
  1646. u8 *ptr;
  1647. u16 i;
  1648. dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
  1649. clear_fifo(acb, "command_phase1");
  1650. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
  1651. if (!(srb->flag & AUTO_REQSENSE)) {
  1652. ptr = (u8 *)srb->cmd->cmnd;
  1653. for (i = 0; i < srb->cmd->cmd_len; i++) {
  1654. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr);
  1655. ptr++;
  1656. }
  1657. } else {
  1658. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
  1659. dcb = acb->active_dcb;
  1660. /* target id */
  1661. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
  1662. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1663. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1664. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
  1665. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  1666. }
  1667. srb->state |= SRB_COMMAND;
  1668. /* it's important for atn stop */
  1669. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1670. /* SCSI command */
  1671. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
  1672. }
  1673. /*
  1674. * Verify that the remaining space in the hw sg lists is the same as
  1675. * the count of remaining bytes in srb->total_xfer_length
  1676. */
  1677. static void sg_verify_length(struct ScsiReqBlk *srb)
  1678. {
  1679. if (debug_enabled(DBG_SG)) {
  1680. unsigned len = 0;
  1681. unsigned idx = srb->sg_index;
  1682. struct SGentry *psge = srb->segment_x + idx;
  1683. for (; idx < srb->sg_count; psge++, idx++)
  1684. len += psge->length;
  1685. if (len != srb->total_xfer_length)
  1686. dprintkdbg(DBG_SG,
  1687. "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
  1688. srb->total_xfer_length, len);
  1689. }
  1690. }
  1691. /*
  1692. * Compute the next Scatter Gather list index and adjust its length
  1693. * and address if necessary
  1694. */
  1695. static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
  1696. {
  1697. u8 idx;
  1698. u32 xferred = srb->total_xfer_length - left; /* bytes transferred */
  1699. struct SGentry *psge = srb->segment_x + srb->sg_index;
  1700. dprintkdbg(DBG_0,
  1701. "sg_update_list: Transferred %i of %i bytes, %i remain\n",
  1702. xferred, srb->total_xfer_length, left);
  1703. if (xferred == 0) {
  1704. /* nothing to update since we did not transfer any data */
  1705. return;
  1706. }
  1707. sg_verify_length(srb);
  1708. srb->total_xfer_length = left; /* update remaining count */
  1709. for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
  1710. if (xferred >= psge->length) {
  1711. /* Complete SG entries done */
  1712. xferred -= psge->length;
  1713. } else {
  1714. /* Partial SG entry done */
  1715. psge->length -= xferred;
  1716. psge->address += xferred;
  1717. srb->sg_index = idx;
  1718. pci_dma_sync_single_for_device(srb->dcb->
  1719. acb->dev,
  1720. srb->sg_bus_addr,
  1721. SEGMENTX_LEN,
  1722. PCI_DMA_TODEVICE);
  1723. break;
  1724. }
  1725. psge++;
  1726. }
  1727. sg_verify_length(srb);
  1728. }
  1729. /*
  1730. * We have transferred a single byte (PIO mode?) and need to update
  1731. * the count of bytes remaining (total_xfer_length) and update the sg
  1732. * entry to either point to next byte in the current sg entry, or of
  1733. * already at the end to point to the start of the next sg entry
  1734. */
  1735. static void sg_subtract_one(struct ScsiReqBlk *srb)
  1736. {
  1737. sg_update_list(srb, srb->total_xfer_length - 1);
  1738. }
  1739. /*
  1740. * cleanup_after_transfer
  1741. *
  1742. * Makes sure, DMA and SCSI engine are empty, after the transfer has finished
  1743. * KG: Currently called from StatusPhase1 ()
  1744. * Should probably also be called from other places
  1745. * Best might be to call it in DataXXPhase0, if new phase will differ
  1746. */
  1747. static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
  1748. struct ScsiReqBlk *srb)
  1749. {
  1750. /*DC395x_write8 (TRM_S1040_DMA_STATUS, FORCEDMACOMP); */
  1751. if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) { /* read */
  1752. if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
  1753. clear_fifo(acb, "cleanup/in");
  1754. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
  1755. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1756. } else { /* write */
  1757. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
  1758. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  1759. if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
  1760. clear_fifo(acb, "cleanup/out");
  1761. }
  1762. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
  1763. }
  1764. /*
  1765. * Those no of bytes will be transferred w/ PIO through the SCSI FIFO
  1766. * Seems to be needed for unknown reasons; could be a hardware bug :-(
  1767. */
  1768. #define DC395x_LASTPIO 4
  1769. static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1770. u16 *pscsi_status)
  1771. {
  1772. struct DeviceCtlBlk *dcb = srb->dcb;
  1773. u16 scsi_status = *pscsi_status;
  1774. u32 d_left_counter = 0;
  1775. dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
  1776. srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
  1777. /*
  1778. * KG: We need to drain the buffers before we draw any conclusions!
  1779. * This means telling the DMA to push the rest into SCSI, telling
  1780. * SCSI to push the rest to the bus.
  1781. * However, the device might have been the one to stop us (phase
  1782. * change), and the data in transit just needs to be accounted so
  1783. * it can be retransmitted.)
  1784. */
  1785. /*
  1786. * KG: Stop DMA engine pushing more data into the SCSI FIFO
  1787. * If we need more data, the DMA SG list will be freshly set up, anyway
  1788. */
  1789. dprintkdbg(DBG_PIO, "data_out_phase0: "
  1790. "DMA{fifocnt=0x%02x fifostat=0x%02x} "
  1791. "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
  1792. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1793. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1794. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1795. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
  1796. srb->total_xfer_length);
  1797. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
  1798. if (!(srb->state & SRB_XFERPAD)) {
  1799. if (scsi_status & PARITYERROR)
  1800. srb->status |= PARITY_ERROR;
  1801. /*
  1802. * KG: Right, we can't just rely on the SCSI_COUNTER, because this
  1803. * is the no of bytes it got from the DMA engine not the no it
  1804. * transferred successfully to the device. (And the difference could
  1805. * be as much as the FIFO size, I guess ...)
  1806. */
  1807. if (!(scsi_status & SCSIXFERDONE)) {
  1808. /*
  1809. * when data transfer from DMA FIFO to SCSI FIFO
  1810. * if there was some data left in SCSI FIFO
  1811. */
  1812. d_left_counter =
  1813. (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
  1814. 0x1F);
  1815. if (dcb->sync_period & WIDE_SYNC)
  1816. d_left_counter <<= 1;
  1817. dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
  1818. "SCSI{fifocnt=0x%02x cnt=0x%08x} "
  1819. "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
  1820. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1821. (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
  1822. DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
  1823. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
  1824. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1825. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1826. DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
  1827. }
  1828. /*
  1829. * calculate all the residue data that not yet tranfered
  1830. * SCSI transfer counter + left in SCSI FIFO data
  1831. *
  1832. * .....TRM_S1040_SCSI_COUNTER (24bits)
  1833. * The counter always decrement by one for every SCSI byte transfer.
  1834. * .....TRM_S1040_SCSI_FIFOCNT ( 5bits)
  1835. * The counter is SCSI FIFO offset counter (in units of bytes or! words)
  1836. */
  1837. if (srb->total_xfer_length > DC395x_LASTPIO)
  1838. d_left_counter +=
  1839. DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
  1840. /* Is this a good idea? */
  1841. /*clear_fifo(acb, "DOP1"); */
  1842. /* KG: What is this supposed to be useful for? WIDE padding stuff? */
  1843. if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
  1844. && scsi_bufflen(srb->cmd) % 2) {
  1845. d_left_counter = 0;
  1846. dprintkl(KERN_INFO,
  1847. "data_out_phase0: Discard 1 byte (0x%02x)\n",
  1848. scsi_status);
  1849. }
  1850. /*
  1851. * KG: Oops again. Same thinko as above: The SCSI might have been
  1852. * faster than the DMA engine, so that it ran out of data.
  1853. * In that case, we have to do just nothing!
  1854. * But: Why the interrupt: No phase change. No XFERCNT_2_ZERO. Or?
  1855. */
  1856. /*
  1857. * KG: This is nonsense: We have been WRITING data to the bus
  1858. * If the SCSI engine has no bytes left, how should the DMA engine?
  1859. */
  1860. if (d_left_counter == 0) {
  1861. srb->total_xfer_length = 0;
  1862. } else {
  1863. /*
  1864. * if transfer not yet complete
  1865. * there were some data residue in SCSI FIFO or
  1866. * SCSI transfer counter not empty
  1867. */
  1868. long oldxferred =
  1869. srb->total_xfer_length - d_left_counter;
  1870. const int diff =
  1871. (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
  1872. sg_update_list(srb, d_left_counter);
  1873. /* KG: Most ugly hack! Apparently, this works around a chip bug */
  1874. if ((srb->segment_x[srb->sg_index].length ==
  1875. diff && scsi_sg_count(srb->cmd))
  1876. || ((oldxferred & ~PAGE_MASK) ==
  1877. (PAGE_SIZE - diff))
  1878. ) {
  1879. dprintkl(KERN_INFO, "data_out_phase0: "
  1880. "Work around chip bug (%i)?\n", diff);
  1881. d_left_counter =
  1882. srb->total_xfer_length - diff;
  1883. sg_update_list(srb, d_left_counter);
  1884. /*srb->total_xfer_length -= diff; */
  1885. /*srb->virt_addr += diff; */
  1886. /*if (srb->cmd->use_sg) */
  1887. /* srb->sg_index++; */
  1888. }
  1889. }
  1890. }
  1891. if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
  1892. cleanup_after_transfer(acb, srb);
  1893. }
  1894. }
  1895. static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1896. u16 *pscsi_status)
  1897. {
  1898. dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
  1899. srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
  1900. clear_fifo(acb, "data_out_phase1");
  1901. /* do prepare before transfer when data out phase */
  1902. data_io_transfer(acb, srb, XFERDATAOUT);
  1903. }
  1904. static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  1905. u16 *pscsi_status)
  1906. {
  1907. u16 scsi_status = *pscsi_status;
  1908. dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
  1909. srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
  1910. /*
  1911. * KG: DataIn is much more tricky than DataOut. When the device is finished
  1912. * and switches to another phase, the SCSI engine should be finished too.
  1913. * But: There might still be bytes left in its FIFO to be fetched by the DMA
  1914. * engine and transferred to memory.
  1915. * We should wait for the FIFOs to be emptied by that (is there any way to
  1916. * enforce this?) and then stop the DMA engine, because it might think, that
  1917. * there are more bytes to follow. Yes, the device might disconnect prior to
  1918. * having all bytes transferred!
  1919. * Also we should make sure that all data from the DMA engine buffer's really
  1920. * made its way to the system memory! Some documentation on this would not
  1921. * seem to be a bad idea, actually.
  1922. */
  1923. if (!(srb->state & SRB_XFERPAD)) {
  1924. u32 d_left_counter;
  1925. unsigned int sc, fc;
  1926. if (scsi_status & PARITYERROR) {
  1927. dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
  1928. "Parity Error\n", srb->cmd);
  1929. srb->status |= PARITY_ERROR;
  1930. }
  1931. /*
  1932. * KG: We should wait for the DMA FIFO to be empty ...
  1933. * but: it would be better to wait first for the SCSI FIFO and then the
  1934. * the DMA FIFO to become empty? How do we know, that the device not already
  1935. * sent data to the FIFO in a MsgIn phase, eg.?
  1936. */
  1937. if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
  1938. #if 0
  1939. int ctr = 6000000;
  1940. dprintkl(KERN_DEBUG,
  1941. "DIP0: Wait for DMA FIFO to flush ...\n");
  1942. /*DC395x_write8 (TRM_S1040_DMA_CONTROL, STOPDMAXFER); */
  1943. /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 7); */
  1944. /*DC395x_write8 (TRM_S1040_SCSI_COMMAND, SCMD_DMA_IN); */
  1945. while (!
  1946. (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
  1947. 0x80) && --ctr);
  1948. if (ctr < 6000000 - 1)
  1949. dprintkl(KERN_DEBUG
  1950. "DIP0: Had to wait for DMA ...\n");
  1951. if (!ctr)
  1952. dprintkl(KERN_ERR,
  1953. "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
  1954. /*DC395x_write32 (TRM_S1040_SCSI_COUNTER, 0); */
  1955. #endif
  1956. dprintkdbg(DBG_KG, "data_in_phase0: "
  1957. "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
  1958. DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
  1959. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
  1960. }
  1961. /* Now: Check remainig data: The SCSI counters should tell us ... */
  1962. sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
  1963. fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  1964. d_left_counter = sc + ((fc & 0x1f)
  1965. << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
  1966. 0));
  1967. dprintkdbg(DBG_KG, "data_in_phase0: "
  1968. "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
  1969. "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
  1970. "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
  1971. fc,
  1972. (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
  1973. sc,
  1974. fc,
  1975. DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
  1976. DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
  1977. srb->total_xfer_length, d_left_counter);
  1978. #if DC395x_LASTPIO
  1979. /* KG: Less than or equal to 4 bytes can not be transferred via DMA, it seems. */
  1980. if (d_left_counter
  1981. && srb->total_xfer_length <= DC395x_LASTPIO) {
  1982. size_t left_io = srb->total_xfer_length;
  1983. /*u32 addr = (srb->segment_x[srb->sg_index].address); */
  1984. /*sg_update_list (srb, d_left_counter); */
  1985. dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) "
  1986. "for remaining %i bytes:",
  1987. fc & 0x1f,
  1988. (srb->dcb->sync_period & WIDE_SYNC) ?
  1989. "words" : "bytes",
  1990. srb->total_xfer_length);
  1991. if (srb->dcb->sync_period & WIDE_SYNC)
  1992. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  1993. CFG2_WIDEFIFO);
  1994. while (left_io) {
  1995. unsigned char *virt, *base = NULL;
  1996. unsigned long flags = 0;
  1997. size_t len = left_io;
  1998. size_t offset = srb->request_length - left_io;
  1999. local_irq_save(flags);
  2000. /* Assumption: it's inside one page as it's at most 4 bytes and
  2001. I just assume it's on a 4-byte boundary */
  2002. base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
  2003. srb->sg_count, &offset, &len);
  2004. virt = base + offset;
  2005. left_io -= len;
  2006. while (len) {
  2007. u8 byte;
  2008. byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2009. *virt++ = byte;
  2010. if (debug_enabled(DBG_PIO))
  2011. printk(" %02x", byte);
  2012. d_left_counter--;
  2013. sg_subtract_one(srb);
  2014. len--;
  2015. fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
  2016. if (fc == 0x40) {
  2017. left_io = 0;
  2018. break;
  2019. }
  2020. }
  2021. WARN_ON((fc != 0x40) == !d_left_counter);
  2022. if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
  2023. /* Read the last byte ... */
  2024. if (srb->total_xfer_length > 0) {
  2025. u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2026. *virt++ = byte;
  2027. srb->total_xfer_length--;
  2028. if (debug_enabled(DBG_PIO))
  2029. printk(" %02x", byte);
  2030. }
  2031. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2032. }
  2033. scsi_kunmap_atomic_sg(base);
  2034. local_irq_restore(flags);
  2035. }
  2036. /*printk(" %08x", *(u32*)(bus_to_virt (addr))); */
  2037. /*srb->total_xfer_length = 0; */
  2038. if (debug_enabled(DBG_PIO))
  2039. printk("\n");
  2040. }
  2041. #endif /* DC395x_LASTPIO */
  2042. #if 0
  2043. /*
  2044. * KG: This was in DATAOUT. Does it also belong here?
  2045. * Nobody seems to know what counter and fifo_cnt count exactly ...
  2046. */
  2047. if (!(scsi_status & SCSIXFERDONE)) {
  2048. /*
  2049. * when data transfer from DMA FIFO to SCSI FIFO
  2050. * if there was some data left in SCSI FIFO
  2051. */
  2052. d_left_counter =
  2053. (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
  2054. 0x1F);
  2055. if (srb->dcb->sync_period & WIDE_SYNC)
  2056. d_left_counter <<= 1;
  2057. /*
  2058. * if WIDE scsi SCSI FIFOCNT unit is word !!!
  2059. * so need to *= 2
  2060. * KG: Seems to be correct ...
  2061. */
  2062. }
  2063. #endif
  2064. /* KG: This should not be needed any more! */
  2065. if (d_left_counter == 0
  2066. || (scsi_status & SCSIXFERCNT_2_ZERO)) {
  2067. #if 0
  2068. int ctr = 6000000;
  2069. u8 TempDMAstatus;
  2070. do {
  2071. TempDMAstatus =
  2072. DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  2073. } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
  2074. if (!ctr)
  2075. dprintkl(KERN_ERR,
  2076. "Deadlock in DataInPhase0 waiting for DMA!!\n");
  2077. srb->total_xfer_length = 0;
  2078. #endif
  2079. srb->total_xfer_length = d_left_counter;
  2080. } else { /* phase changed */
  2081. /*
  2082. * parsing the case:
  2083. * when a transfer not yet complete
  2084. * but be disconnected by target
  2085. * if transfer not yet complete
  2086. * there were some data residue in SCSI FIFO or
  2087. * SCSI transfer counter not empty
  2088. */
  2089. sg_update_list(srb, d_left_counter);
  2090. }
  2091. }
  2092. /* KG: The target may decide to disconnect: Empty FIFO before! */
  2093. if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
  2094. cleanup_after_transfer(acb, srb);
  2095. }
  2096. }
  2097. static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2098. u16 *pscsi_status)
  2099. {
  2100. dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
  2101. srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
  2102. data_io_transfer(acb, srb, XFERDATAIN);
  2103. }
  2104. static void data_io_transfer(struct AdapterCtlBlk *acb,
  2105. struct ScsiReqBlk *srb, u16 io_dir)
  2106. {
  2107. struct DeviceCtlBlk *dcb = srb->dcb;
  2108. u8 bval;
  2109. dprintkdbg(DBG_0,
  2110. "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
  2111. srb->cmd, srb->cmd->device->id, srb->cmd->device->lun,
  2112. ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
  2113. srb->total_xfer_length, srb->sg_index, srb->sg_count);
  2114. if (srb == acb->tmp_srb)
  2115. dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
  2116. if (srb->sg_index >= srb->sg_count) {
  2117. /* can't happen? out of bounds error */
  2118. return;
  2119. }
  2120. if (srb->total_xfer_length > DC395x_LASTPIO) {
  2121. u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
  2122. /*
  2123. * KG: What should we do: Use SCSI Cmd 0x90/0x92?
  2124. * Maybe, even ABORTXFER would be appropriate
  2125. */
  2126. if (dma_status & XFERPENDING) {
  2127. dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
  2128. "Expect trouble!\n");
  2129. dump_register_info(acb, dcb, srb);
  2130. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
  2131. }
  2132. /* clear_fifo(acb, "IO"); */
  2133. /*
  2134. * load what physical address of Scatter/Gather list table
  2135. * want to be transfer
  2136. */
  2137. srb->state |= SRB_DATA_XFER;
  2138. DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
  2139. if (scsi_sg_count(srb->cmd)) { /* with S/G */
  2140. io_dir |= DMACMD_SG;
  2141. DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
  2142. srb->sg_bus_addr +
  2143. sizeof(struct SGentry) *
  2144. srb->sg_index);
  2145. /* load how many bytes in the sg list table */
  2146. DC395x_write32(acb, TRM_S1040_DMA_XCNT,
  2147. ((u32)(srb->sg_count -
  2148. srb->sg_index) << 3));
  2149. } else { /* without S/G */
  2150. io_dir &= ~DMACMD_SG;
  2151. DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
  2152. srb->segment_x[0].address);
  2153. DC395x_write32(acb, TRM_S1040_DMA_XCNT,
  2154. srb->segment_x[0].length);
  2155. }
  2156. /* load total transfer length (24bits) max value 16Mbyte */
  2157. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
  2158. srb->total_xfer_length);
  2159. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2160. if (io_dir & DMACMD_DIR) { /* read */
  2161. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2162. SCMD_DMA_IN);
  2163. DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
  2164. } else {
  2165. DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
  2166. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2167. SCMD_DMA_OUT);
  2168. }
  2169. }
  2170. #if DC395x_LASTPIO
  2171. else if (srb->total_xfer_length > 0) { /* The last four bytes: Do PIO */
  2172. /*
  2173. * load what physical address of Scatter/Gather list table
  2174. * want to be transfer
  2175. */
  2176. srb->state |= SRB_DATA_XFER;
  2177. /* load total transfer length (24bits) max value 16Mbyte */
  2178. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
  2179. srb->total_xfer_length);
  2180. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2181. if (io_dir & DMACMD_DIR) { /* read */
  2182. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2183. SCMD_FIFO_IN);
  2184. } else { /* write */
  2185. int ln = srb->total_xfer_length;
  2186. size_t left_io = srb->total_xfer_length;
  2187. if (srb->dcb->sync_period & WIDE_SYNC)
  2188. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2189. CFG2_WIDEFIFO);
  2190. while (left_io) {
  2191. unsigned char *virt, *base = NULL;
  2192. unsigned long flags = 0;
  2193. size_t len = left_io;
  2194. size_t offset = srb->request_length - left_io;
  2195. local_irq_save(flags);
  2196. /* Again, max 4 bytes */
  2197. base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
  2198. srb->sg_count, &offset, &len);
  2199. virt = base + offset;
  2200. left_io -= len;
  2201. while (len--) {
  2202. if (debug_enabled(DBG_PIO))
  2203. printk(" %02x", *virt);
  2204. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++);
  2205. sg_subtract_one(srb);
  2206. }
  2207. scsi_kunmap_atomic_sg(base);
  2208. local_irq_restore(flags);
  2209. }
  2210. if (srb->dcb->sync_period & WIDE_SYNC) {
  2211. if (ln % 2) {
  2212. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
  2213. if (debug_enabled(DBG_PIO))
  2214. printk(" |00");
  2215. }
  2216. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2217. }
  2218. /*DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, ln); */
  2219. if (debug_enabled(DBG_PIO))
  2220. printk("\n");
  2221. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
  2222. SCMD_FIFO_OUT);
  2223. }
  2224. }
  2225. #endif /* DC395x_LASTPIO */
  2226. else { /* xfer pad */
  2227. u8 data = 0, data2 = 0;
  2228. if (srb->sg_count) {
  2229. srb->adapter_status = H_OVER_UNDER_RUN;
  2230. srb->status |= OVER_RUN;
  2231. }
  2232. /*
  2233. * KG: despite the fact that we are using 16 bits I/O ops
  2234. * the SCSI FIFO is only 8 bits according to the docs
  2235. * (we can set bit 1 in 0x8f to serialize FIFO access ...)
  2236. */
  2237. if (dcb->sync_period & WIDE_SYNC) {
  2238. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2);
  2239. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
  2240. CFG2_WIDEFIFO);
  2241. if (io_dir & DMACMD_DIR) {
  2242. data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2243. data2 = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2244. } else {
  2245. /* Danger, Robinson: If you find KGs
  2246. * scattered over the wide disk, the driver
  2247. * or chip is to blame :-( */
  2248. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
  2249. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G');
  2250. }
  2251. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
  2252. } else {
  2253. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
  2254. /* Danger, Robinson: If you find a collection of Ks on your disk
  2255. * something broke :-( */
  2256. if (io_dir & DMACMD_DIR)
  2257. data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2258. else
  2259. DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
  2260. }
  2261. srb->state |= SRB_XFERPAD;
  2262. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2263. /* SCSI command */
  2264. bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT;
  2265. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval);
  2266. }
  2267. }
  2268. static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2269. u16 *pscsi_status)
  2270. {
  2271. dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
  2272. srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
  2273. srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2274. srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
  2275. srb->state = SRB_COMPLETED;
  2276. *pscsi_status = PH_BUS_FREE; /*.. initial phase */
  2277. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2278. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2279. }
  2280. static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2281. u16 *pscsi_status)
  2282. {
  2283. dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
  2284. srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
  2285. srb->state = SRB_STATUS;
  2286. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2287. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
  2288. }
  2289. /* Check if the message is complete */
  2290. static inline u8 msgin_completed(u8 * msgbuf, u32 len)
  2291. {
  2292. if (*msgbuf == EXTENDED_MESSAGE) {
  2293. if (len < 2)
  2294. return 0;
  2295. if (len < msgbuf[1] + 2)
  2296. return 0;
  2297. } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f) /* two byte messages */
  2298. if (len < 2)
  2299. return 0;
  2300. return 1;
  2301. }
  2302. /* reject_msg */
  2303. static inline void msgin_reject(struct AdapterCtlBlk *acb,
  2304. struct ScsiReqBlk *srb)
  2305. {
  2306. srb->msgout_buf[0] = MESSAGE_REJECT;
  2307. srb->msg_count = 1;
  2308. DC395x_ENABLE_MSGOUT;
  2309. srb->state &= ~SRB_MSGIN;
  2310. srb->state |= SRB_MSGOUT;
  2311. dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
  2312. srb->msgin_buf[0],
  2313. srb->dcb->target_id, srb->dcb->target_lun);
  2314. }
  2315. static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
  2316. struct DeviceCtlBlk *dcb, u8 tag)
  2317. {
  2318. struct ScsiReqBlk *srb = NULL;
  2319. struct ScsiReqBlk *i;
  2320. dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
  2321. srb->cmd, tag, srb);
  2322. if (!(dcb->tag_mask & (1 << tag)))
  2323. dprintkl(KERN_DEBUG,
  2324. "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
  2325. dcb->tag_mask, tag);
  2326. if (list_empty(&dcb->srb_going_list))
  2327. goto mingx0;
  2328. list_for_each_entry(i, &dcb->srb_going_list, list) {
  2329. if (i->tag_number == tag) {
  2330. srb = i;
  2331. break;
  2332. }
  2333. }
  2334. if (!srb)
  2335. goto mingx0;
  2336. dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
  2337. srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
  2338. if (dcb->flag & ABORT_DEV_) {
  2339. /*srb->state = SRB_ABORT_SENT; */
  2340. enable_msgout_abort(acb, srb);
  2341. }
  2342. if (!(srb->state & SRB_DISCONNECT))
  2343. goto mingx0;
  2344. memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
  2345. srb->state |= dcb->active_srb->state;
  2346. srb->state |= SRB_DATA_XFER;
  2347. dcb->active_srb = srb;
  2348. /* How can we make the DORS happy? */
  2349. return srb;
  2350. mingx0:
  2351. srb = acb->tmp_srb;
  2352. srb->state = SRB_UNEXPECT_RESEL;
  2353. dcb->active_srb = srb;
  2354. srb->msgout_buf[0] = MSG_ABORT_TAG;
  2355. srb->msg_count = 1;
  2356. DC395x_ENABLE_MSGOUT;
  2357. dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
  2358. return srb;
  2359. }
  2360. static inline void reprogram_regs(struct AdapterCtlBlk *acb,
  2361. struct DeviceCtlBlk *dcb)
  2362. {
  2363. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
  2364. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
  2365. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
  2366. set_xfer_rate(acb, dcb);
  2367. }
  2368. /* set async transfer mode */
  2369. static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2370. {
  2371. struct DeviceCtlBlk *dcb = srb->dcb;
  2372. dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
  2373. dcb->target_id, dcb->target_lun);
  2374. dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
  2375. dcb->sync_mode |= SYNC_NEGO_DONE;
  2376. /*dcb->sync_period &= 0; */
  2377. dcb->sync_offset = 0;
  2378. dcb->min_nego_period = 200 >> 2; /* 200ns <=> 5 MHz */
  2379. srb->state &= ~SRB_DO_SYNC_NEGO;
  2380. reprogram_regs(acb, dcb);
  2381. if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
  2382. && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
  2383. build_wdtr(acb, dcb, srb);
  2384. DC395x_ENABLE_MSGOUT;
  2385. dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
  2386. }
  2387. }
  2388. /* set sync transfer mode */
  2389. static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2390. {
  2391. struct DeviceCtlBlk *dcb = srb->dcb;
  2392. u8 bval;
  2393. int fact;
  2394. dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
  2395. "(%02i.%01i MHz) Offset %i\n",
  2396. dcb->target_id, srb->msgin_buf[3] << 2,
  2397. (250 / srb->msgin_buf[3]),
  2398. ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
  2399. srb->msgin_buf[4]);
  2400. if (srb->msgin_buf[4] > 15)
  2401. srb->msgin_buf[4] = 15;
  2402. if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
  2403. dcb->sync_offset = 0;
  2404. else if (dcb->sync_offset == 0)
  2405. dcb->sync_offset = srb->msgin_buf[4];
  2406. if (srb->msgin_buf[4] > dcb->sync_offset)
  2407. srb->msgin_buf[4] = dcb->sync_offset;
  2408. else
  2409. dcb->sync_offset = srb->msgin_buf[4];
  2410. bval = 0;
  2411. while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
  2412. || dcb->min_nego_period >
  2413. clock_period[bval]))
  2414. bval++;
  2415. if (srb->msgin_buf[3] < clock_period[bval])
  2416. dprintkl(KERN_INFO,
  2417. "msgin_set_sync: Increase sync nego period to %ins\n",
  2418. clock_period[bval] << 2);
  2419. srb->msgin_buf[3] = clock_period[bval];
  2420. dcb->sync_period &= 0xf0;
  2421. dcb->sync_period |= ALT_SYNC | bval;
  2422. dcb->min_nego_period = srb->msgin_buf[3];
  2423. if (dcb->sync_period & WIDE_SYNC)
  2424. fact = 500;
  2425. else
  2426. fact = 250;
  2427. dprintkl(KERN_INFO,
  2428. "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
  2429. dcb->target_id, (fact == 500) ? "Wide16" : "",
  2430. dcb->min_nego_period << 2, dcb->sync_offset,
  2431. (fact / dcb->min_nego_period),
  2432. ((fact % dcb->min_nego_period) * 10 +
  2433. dcb->min_nego_period / 2) / dcb->min_nego_period);
  2434. if (!(srb->state & SRB_DO_SYNC_NEGO)) {
  2435. /* Reply with corrected SDTR Message */
  2436. dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
  2437. srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
  2438. memcpy(srb->msgout_buf, srb->msgin_buf, 5);
  2439. srb->msg_count = 5;
  2440. DC395x_ENABLE_MSGOUT;
  2441. dcb->sync_mode |= SYNC_NEGO_DONE;
  2442. } else {
  2443. if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
  2444. && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
  2445. build_wdtr(acb, dcb, srb);
  2446. DC395x_ENABLE_MSGOUT;
  2447. dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
  2448. }
  2449. }
  2450. srb->state &= ~SRB_DO_SYNC_NEGO;
  2451. dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
  2452. reprogram_regs(acb, dcb);
  2453. }
  2454. static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
  2455. struct ScsiReqBlk *srb)
  2456. {
  2457. struct DeviceCtlBlk *dcb = srb->dcb;
  2458. dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
  2459. dcb->sync_period &= ~WIDE_SYNC;
  2460. dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
  2461. dcb->sync_mode |= WIDE_NEGO_DONE;
  2462. srb->state &= ~SRB_DO_WIDE_NEGO;
  2463. reprogram_regs(acb, dcb);
  2464. if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  2465. && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
  2466. build_sdtr(acb, dcb, srb);
  2467. DC395x_ENABLE_MSGOUT;
  2468. dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
  2469. }
  2470. }
  2471. static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2472. {
  2473. struct DeviceCtlBlk *dcb = srb->dcb;
  2474. u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
  2475. && acb->config & HCC_WIDE_CARD) ? 1 : 0;
  2476. dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
  2477. if (srb->msgin_buf[3] > wide)
  2478. srb->msgin_buf[3] = wide;
  2479. /* Completed */
  2480. if (!(srb->state & SRB_DO_WIDE_NEGO)) {
  2481. dprintkl(KERN_DEBUG,
  2482. "msgin_set_wide: Wide nego initiated <%02i>\n",
  2483. dcb->target_id);
  2484. memcpy(srb->msgout_buf, srb->msgin_buf, 4);
  2485. srb->msg_count = 4;
  2486. srb->state |= SRB_DO_WIDE_NEGO;
  2487. DC395x_ENABLE_MSGOUT;
  2488. }
  2489. dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
  2490. if (srb->msgin_buf[3] > 0)
  2491. dcb->sync_period |= WIDE_SYNC;
  2492. else
  2493. dcb->sync_period &= ~WIDE_SYNC;
  2494. srb->state &= ~SRB_DO_WIDE_NEGO;
  2495. /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */
  2496. dprintkdbg(DBG_1,
  2497. "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
  2498. (8 << srb->msgin_buf[3]), dcb->target_id);
  2499. reprogram_regs(acb, dcb);
  2500. if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
  2501. && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
  2502. build_sdtr(acb, dcb, srb);
  2503. DC395x_ENABLE_MSGOUT;
  2504. dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
  2505. }
  2506. }
  2507. /*
  2508. * extended message codes:
  2509. *
  2510. * code description
  2511. *
  2512. * 02h Reserved
  2513. * 00h MODIFY DATA POINTER
  2514. * 01h SYNCHRONOUS DATA TRANSFER REQUEST
  2515. * 03h WIDE DATA TRANSFER REQUEST
  2516. * 04h - 7Fh Reserved
  2517. * 80h - FFh Vendor specific
  2518. */
  2519. static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2520. u16 *pscsi_status)
  2521. {
  2522. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2523. dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
  2524. srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
  2525. if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
  2526. /* Now eval the msg */
  2527. switch (srb->msgin_buf[0]) {
  2528. case DISCONNECT:
  2529. srb->state = SRB_DISCONNECT;
  2530. break;
  2531. case SIMPLE_QUEUE_TAG:
  2532. case HEAD_OF_QUEUE_TAG:
  2533. case ORDERED_QUEUE_TAG:
  2534. srb =
  2535. msgin_qtag(acb, dcb,
  2536. srb->msgin_buf[1]);
  2537. break;
  2538. case MESSAGE_REJECT:
  2539. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
  2540. DO_CLRATN | DO_DATALATCH);
  2541. /* A sync nego message was rejected ! */
  2542. if (srb->state & SRB_DO_SYNC_NEGO) {
  2543. msgin_set_async(acb, srb);
  2544. break;
  2545. }
  2546. /* A wide nego message was rejected ! */
  2547. if (srb->state & SRB_DO_WIDE_NEGO) {
  2548. msgin_set_nowide(acb, srb);
  2549. break;
  2550. }
  2551. enable_msgout_abort(acb, srb);
  2552. /*srb->state |= SRB_ABORT_SENT */
  2553. break;
  2554. case EXTENDED_MESSAGE:
  2555. /* SDTR */
  2556. if (srb->msgin_buf[1] == 3
  2557. && srb->msgin_buf[2] == EXTENDED_SDTR) {
  2558. msgin_set_sync(acb, srb);
  2559. break;
  2560. }
  2561. /* WDTR */
  2562. if (srb->msgin_buf[1] == 2
  2563. && srb->msgin_buf[2] == EXTENDED_WDTR
  2564. && srb->msgin_buf[3] <= 2) { /* sanity check ... */
  2565. msgin_set_wide(acb, srb);
  2566. break;
  2567. }
  2568. msgin_reject(acb, srb);
  2569. break;
  2570. case MSG_IGNOREWIDE:
  2571. /* Discard wide residual */
  2572. dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
  2573. break;
  2574. case COMMAND_COMPLETE:
  2575. /* nothing has to be done */
  2576. break;
  2577. case SAVE_POINTERS:
  2578. /*
  2579. * SAVE POINTER may be ignored as we have the struct
  2580. * ScsiReqBlk* associated with the scsi command.
  2581. */
  2582. dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
  2583. "SAVE POINTER rem=%i Ignore\n",
  2584. srb->cmd, srb->total_xfer_length);
  2585. break;
  2586. case RESTORE_POINTERS:
  2587. dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
  2588. break;
  2589. case ABORT:
  2590. dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
  2591. "<%02i-%i> ABORT msg\n",
  2592. srb->cmd, dcb->target_id,
  2593. dcb->target_lun);
  2594. dcb->flag |= ABORT_DEV_;
  2595. enable_msgout_abort(acb, srb);
  2596. break;
  2597. default:
  2598. /* reject unknown messages */
  2599. if (srb->msgin_buf[0] & IDENTIFY_BASE) {
  2600. dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
  2601. srb->msg_count = 1;
  2602. srb->msgout_buf[0] = dcb->identify_msg;
  2603. DC395x_ENABLE_MSGOUT;
  2604. srb->state |= SRB_MSGOUT;
  2605. /*break; */
  2606. }
  2607. msgin_reject(acb, srb);
  2608. }
  2609. /* Clear counter and MsgIn state */
  2610. srb->state &= ~SRB_MSGIN;
  2611. acb->msg_len = 0;
  2612. }
  2613. *pscsi_status = PH_BUS_FREE;
  2614. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important ... you know! */
  2615. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2616. }
  2617. static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2618. u16 *pscsi_status)
  2619. {
  2620. dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
  2621. clear_fifo(acb, "msgin_phase1");
  2622. DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
  2623. if (!(srb->state & SRB_MSGIN)) {
  2624. srb->state &= ~SRB_DISCONNECT;
  2625. srb->state |= SRB_MSGIN;
  2626. }
  2627. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2628. /* SCSI command */
  2629. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN);
  2630. }
  2631. static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2632. u16 *pscsi_status)
  2633. {
  2634. }
  2635. static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
  2636. u16 *pscsi_status)
  2637. {
  2638. }
  2639. static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
  2640. {
  2641. struct DeviceCtlBlk *i;
  2642. /* set all lun device's period, offset */
  2643. if (dcb->identify_msg & 0x07)
  2644. return;
  2645. if (acb->scan_devices) {
  2646. current_sync_offset = dcb->sync_offset;
  2647. return;
  2648. }
  2649. list_for_each_entry(i, &acb->dcb_list, list)
  2650. if (i->target_id == dcb->target_id) {
  2651. i->sync_period = dcb->sync_period;
  2652. i->sync_offset = dcb->sync_offset;
  2653. i->sync_mode = dcb->sync_mode;
  2654. i->min_nego_period = dcb->min_nego_period;
  2655. }
  2656. }
  2657. static void disconnect(struct AdapterCtlBlk *acb)
  2658. {
  2659. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2660. struct ScsiReqBlk *srb;
  2661. if (!dcb) {
  2662. dprintkl(KERN_ERR, "disconnect: No such device\n");
  2663. udelay(500);
  2664. /* Suspend queue for a while */
  2665. acb->scsi_host->last_reset =
  2666. jiffies + HZ / 2 +
  2667. HZ * acb->eeprom.delay_time;
  2668. clear_fifo(acb, "disconnectEx");
  2669. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
  2670. return;
  2671. }
  2672. srb = dcb->active_srb;
  2673. acb->active_dcb = NULL;
  2674. dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
  2675. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  2676. clear_fifo(acb, "disconnect");
  2677. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
  2678. if (srb->state & SRB_UNEXPECT_RESEL) {
  2679. dprintkl(KERN_ERR,
  2680. "disconnect: Unexpected reselection <%02i-%i>\n",
  2681. dcb->target_id, dcb->target_lun);
  2682. srb->state = 0;
  2683. waiting_process_next(acb);
  2684. } else if (srb->state & SRB_ABORT_SENT) {
  2685. dcb->flag &= ~ABORT_DEV_;
  2686. acb->scsi_host->last_reset = jiffies + HZ / 2 + 1;
  2687. dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
  2688. doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
  2689. waiting_process_next(acb);
  2690. } else {
  2691. if ((srb->state & (SRB_START_ + SRB_MSGOUT))
  2692. || !(srb->
  2693. state & (SRB_DISCONNECT + SRB_COMPLETED))) {
  2694. /*
  2695. * Selection time out
  2696. * SRB_START_ || SRB_MSGOUT || (!SRB_DISCONNECT && !SRB_COMPLETED)
  2697. */
  2698. /* Unexp. Disc / Sel Timeout */
  2699. if (srb->state != SRB_START_
  2700. && srb->state != SRB_MSGOUT) {
  2701. srb->state = SRB_READY;
  2702. dprintkl(KERN_DEBUG,
  2703. "disconnect: (0x%p) Unexpected\n",
  2704. srb->cmd);
  2705. srb->target_status = SCSI_STAT_SEL_TIMEOUT;
  2706. goto disc1;
  2707. } else {
  2708. /* Normal selection timeout */
  2709. dprintkdbg(DBG_KG, "disconnect: (0x%p) "
  2710. "<%02i-%i> SelTO\n", srb->cmd,
  2711. dcb->target_id, dcb->target_lun);
  2712. if (srb->retry_count++ > DC395x_MAX_RETRIES
  2713. || acb->scan_devices) {
  2714. srb->target_status =
  2715. SCSI_STAT_SEL_TIMEOUT;
  2716. goto disc1;
  2717. }
  2718. free_tag(dcb, srb);
  2719. srb_going_to_waiting_move(dcb, srb);
  2720. dprintkdbg(DBG_KG,
  2721. "disconnect: (0x%p) Retry\n",
  2722. srb->cmd);
  2723. waiting_set_timer(acb, HZ / 20);
  2724. }
  2725. } else if (srb->state & SRB_DISCONNECT) {
  2726. u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
  2727. /*
  2728. * SRB_DISCONNECT (This is what we expect!)
  2729. */
  2730. if (bval & 0x40) {
  2731. dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
  2732. " 0x%02x: ACK set! Other controllers?\n",
  2733. bval);
  2734. /* It could come from another initiator, therefore don't do much ! */
  2735. } else
  2736. waiting_process_next(acb);
  2737. } else if (srb->state & SRB_COMPLETED) {
  2738. disc1:
  2739. /*
  2740. ** SRB_COMPLETED
  2741. */
  2742. free_tag(dcb, srb);
  2743. dcb->active_srb = NULL;
  2744. srb->state = SRB_FREE;
  2745. srb_done(acb, dcb, srb);
  2746. }
  2747. }
  2748. }
  2749. static void reselect(struct AdapterCtlBlk *acb)
  2750. {
  2751. struct DeviceCtlBlk *dcb = acb->active_dcb;
  2752. struct ScsiReqBlk *srb = NULL;
  2753. u16 rsel_tar_lun_id;
  2754. u8 id, lun;
  2755. u8 arblostflag = 0;
  2756. dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
  2757. clear_fifo(acb, "reselect");
  2758. /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT | DO_DATALATCH); */
  2759. /* Read Reselected Target ID and LUN */
  2760. rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID);
  2761. if (dcb) { /* Arbitration lost but Reselection win */
  2762. srb = dcb->active_srb;
  2763. if (!srb) {
  2764. dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
  2765. "but active_srb == NULL\n");
  2766. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2767. return;
  2768. }
  2769. /* Why the if ? */
  2770. if (!acb->scan_devices) {
  2771. dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
  2772. "Arb lost but Resel win rsel=%i stat=0x%04x\n",
  2773. srb->cmd, dcb->target_id,
  2774. dcb->target_lun, rsel_tar_lun_id,
  2775. DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
  2776. arblostflag = 1;
  2777. /*srb->state |= SRB_DISCONNECT; */
  2778. srb->state = SRB_READY;
  2779. free_tag(dcb, srb);
  2780. srb_going_to_waiting_move(dcb, srb);
  2781. waiting_set_timer(acb, HZ / 20);
  2782. /* return; */
  2783. }
  2784. }
  2785. /* Read Reselected Target Id and LUN */
  2786. if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
  2787. dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
  2788. "Got %i!\n", rsel_tar_lun_id);
  2789. id = rsel_tar_lun_id & 0xff;
  2790. lun = (rsel_tar_lun_id >> 8) & 7;
  2791. dcb = find_dcb(acb, id, lun);
  2792. if (!dcb) {
  2793. dprintkl(KERN_ERR, "reselect: From non existent device "
  2794. "<%02i-%i>\n", id, lun);
  2795. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2796. return;
  2797. }
  2798. acb->active_dcb = dcb;
  2799. if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
  2800. dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
  2801. "disconnection? <%02i-%i>\n",
  2802. dcb->target_id, dcb->target_lun);
  2803. if (dcb->sync_mode & EN_TAG_QUEUEING /*&& !arblostflag */) {
  2804. srb = acb->tmp_srb;
  2805. dcb->active_srb = srb;
  2806. } else {
  2807. /* There can be only one! */
  2808. srb = dcb->active_srb;
  2809. if (!srb || !(srb->state & SRB_DISCONNECT)) {
  2810. /*
  2811. * abort command
  2812. */
  2813. dprintkl(KERN_DEBUG,
  2814. "reselect: w/o disconnected cmds <%02i-%i>\n",
  2815. dcb->target_id, dcb->target_lun);
  2816. srb = acb->tmp_srb;
  2817. srb->state = SRB_UNEXPECT_RESEL;
  2818. dcb->active_srb = srb;
  2819. enable_msgout_abort(acb, srb);
  2820. } else {
  2821. if (dcb->flag & ABORT_DEV_) {
  2822. /*srb->state = SRB_ABORT_SENT; */
  2823. enable_msgout_abort(acb, srb);
  2824. } else
  2825. srb->state = SRB_DATA_XFER;
  2826. }
  2827. }
  2828. srb->scsi_phase = PH_BUS_FREE; /* initial phase */
  2829. /* Program HA ID, target ID, period and offset */
  2830. dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
  2831. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id); /* host ID */
  2832. DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */
  2833. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */
  2834. DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); /* sync period, wide */
  2835. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
  2836. /* SCSI command */
  2837. DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
  2838. }
  2839. static inline u8 tagq_blacklist(char *name)
  2840. {
  2841. #ifndef DC395x_NO_TAGQ
  2842. #if 0
  2843. u8 i;
  2844. for (i = 0; i < BADDEVCNT; i++)
  2845. if (memcmp(name, DC395x_baddevname1[i], 28) == 0)
  2846. return 1;
  2847. #endif
  2848. return 0;
  2849. #else
  2850. return 1;
  2851. #endif
  2852. }
  2853. static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
  2854. {
  2855. /* Check for SCSI format (ANSI and Response data format) */
  2856. if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) {
  2857. if ((ptr->Flags & SCSI_INQ_CMDQUEUE)
  2858. && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
  2859. /*(dcb->dev_mode & NTC_DO_DISCONNECT) */
  2860. /* ((dcb->dev_type == TYPE_DISK)
  2861. || (dcb->dev_type == TYPE_MOD)) && */
  2862. !tagq_blacklist(((char *)ptr) + 8)) {
  2863. if (dcb->max_command == 1)
  2864. dcb->max_command =
  2865. dcb->acb->tag_max_num;
  2866. dcb->sync_mode |= EN_TAG_QUEUEING;
  2867. /*dcb->tag_mask = 0; */
  2868. } else
  2869. dcb->max_command = 1;
  2870. }
  2871. }
  2872. static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  2873. struct ScsiInqData *ptr)
  2874. {
  2875. u8 bval1 = ptr->DevType & SCSI_DEVTYPE;
  2876. dcb->dev_type = bval1;
  2877. /* if (bval1 == TYPE_DISK || bval1 == TYPE_MOD) */
  2878. disc_tagq_set(dcb, ptr);
  2879. }
  2880. /* unmap mapped pci regions from SRB */
  2881. static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
  2882. {
  2883. struct scsi_cmnd *cmd = srb->cmd;
  2884. enum dma_data_direction dir = cmd->sc_data_direction;
  2885. if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
  2886. /* unmap DC395x SG list */
  2887. dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
  2888. srb->sg_bus_addr, SEGMENTX_LEN);
  2889. pci_unmap_single(acb->dev, srb->sg_bus_addr,
  2890. SEGMENTX_LEN,
  2891. PCI_DMA_TODEVICE);
  2892. dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
  2893. scsi_sg_count(cmd), scsi_bufflen(cmd));
  2894. /* unmap the sg segments */
  2895. scsi_dma_unmap(cmd);
  2896. }
  2897. }
  2898. /* unmap mapped pci sense buffer from SRB */
  2899. static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
  2900. struct ScsiReqBlk *srb)
  2901. {
  2902. if (!(srb->flag & AUTO_REQSENSE))
  2903. return;
  2904. /* Unmap sense buffer */
  2905. dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
  2906. srb->segment_x[0].address);
  2907. pci_unmap_single(acb->dev, srb->segment_x[0].address,
  2908. srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
  2909. /* Restore SG stuff */
  2910. srb->total_xfer_length = srb->xferred;
  2911. srb->segment_x[0].address =
  2912. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
  2913. srb->segment_x[0].length =
  2914. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
  2915. }
  2916. /*
  2917. * Complete execution of a SCSI command
  2918. * Signal completion to the generic SCSI driver
  2919. */
  2920. static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  2921. struct ScsiReqBlk *srb)
  2922. {
  2923. u8 tempcnt, status;
  2924. struct scsi_cmnd *cmd = srb->cmd;
  2925. enum dma_data_direction dir = cmd->sc_data_direction;
  2926. int ckc_only = 1;
  2927. dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
  2928. srb->cmd->device->id, srb->cmd->device->lun);
  2929. dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
  2930. srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
  2931. scsi_sgtalbe(cmd));
  2932. status = srb->target_status;
  2933. if (srb->flag & AUTO_REQSENSE) {
  2934. dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
  2935. pci_unmap_srb_sense(acb, srb);
  2936. /*
  2937. ** target status..........................
  2938. */
  2939. srb->flag &= ~AUTO_REQSENSE;
  2940. srb->adapter_status = 0;
  2941. srb->target_status = CHECK_CONDITION << 1;
  2942. if (debug_enabled(DBG_1)) {
  2943. switch (cmd->sense_buffer[2] & 0x0f) {
  2944. case NOT_READY:
  2945. dprintkl(KERN_DEBUG,
  2946. "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2947. cmd->cmnd[0], dcb->target_id,
  2948. dcb->target_lun, status, acb->scan_devices);
  2949. break;
  2950. case UNIT_ATTENTION:
  2951. dprintkl(KERN_DEBUG,
  2952. "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2953. cmd->cmnd[0], dcb->target_id,
  2954. dcb->target_lun, status, acb->scan_devices);
  2955. break;
  2956. case ILLEGAL_REQUEST:
  2957. dprintkl(KERN_DEBUG,
  2958. "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2959. cmd->cmnd[0], dcb->target_id,
  2960. dcb->target_lun, status, acb->scan_devices);
  2961. break;
  2962. case MEDIUM_ERROR:
  2963. dprintkl(KERN_DEBUG,
  2964. "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2965. cmd->cmnd[0], dcb->target_id,
  2966. dcb->target_lun, status, acb->scan_devices);
  2967. break;
  2968. case HARDWARE_ERROR:
  2969. dprintkl(KERN_DEBUG,
  2970. "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
  2971. cmd->cmnd[0], dcb->target_id,
  2972. dcb->target_lun, status, acb->scan_devices);
  2973. break;
  2974. }
  2975. if (cmd->sense_buffer[7] >= 6)
  2976. printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
  2977. "(0x%08x 0x%08x)\n",
  2978. cmd->sense_buffer[2], cmd->sense_buffer[12],
  2979. cmd->sense_buffer[13],
  2980. *((unsigned int *)(cmd->sense_buffer + 3)),
  2981. *((unsigned int *)(cmd->sense_buffer + 8)));
  2982. else
  2983. printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
  2984. cmd->sense_buffer[2],
  2985. *((unsigned int *)(cmd->sense_buffer + 3)));
  2986. }
  2987. if (status == (CHECK_CONDITION << 1)) {
  2988. cmd->result = DID_BAD_TARGET << 16;
  2989. goto ckc_e;
  2990. }
  2991. dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
  2992. if (srb->total_xfer_length
  2993. && srb->total_xfer_length >= cmd->underflow)
  2994. cmd->result =
  2995. MK_RES_LNX(DRIVER_SENSE, DID_OK,
  2996. srb->end_message, CHECK_CONDITION);
  2997. /*SET_RES_DID(cmd->result,DID_OK) */
  2998. else
  2999. cmd->result =
  3000. MK_RES_LNX(DRIVER_SENSE, DID_OK,
  3001. srb->end_message, CHECK_CONDITION);
  3002. goto ckc_e;
  3003. }
  3004. /*************************************************************/
  3005. if (status) {
  3006. /*
  3007. * target status..........................
  3008. */
  3009. if (status_byte(status) == CHECK_CONDITION) {
  3010. request_sense(acb, dcb, srb);
  3011. return;
  3012. } else if (status_byte(status) == QUEUE_FULL) {
  3013. tempcnt = (u8)list_size(&dcb->srb_going_list);
  3014. dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
  3015. dcb->target_id, dcb->target_lun, tempcnt);
  3016. if (tempcnt > 1)
  3017. tempcnt--;
  3018. dcb->max_command = tempcnt;
  3019. free_tag(dcb, srb);
  3020. srb_going_to_waiting_move(dcb, srb);
  3021. waiting_set_timer(acb, HZ / 20);
  3022. srb->adapter_status = 0;
  3023. srb->target_status = 0;
  3024. return;
  3025. } else if (status == SCSI_STAT_SEL_TIMEOUT) {
  3026. srb->adapter_status = H_SEL_TIMEOUT;
  3027. srb->target_status = 0;
  3028. cmd->result = DID_NO_CONNECT << 16;
  3029. } else {
  3030. srb->adapter_status = 0;
  3031. SET_RES_DID(cmd->result, DID_ERROR);
  3032. SET_RES_MSG(cmd->result, srb->end_message);
  3033. SET_RES_TARGET(cmd->result, status);
  3034. }
  3035. } else {
  3036. /*
  3037. ** process initiator status..........................
  3038. */
  3039. status = srb->adapter_status;
  3040. if (status & H_OVER_UNDER_RUN) {
  3041. srb->target_status = 0;
  3042. SET_RES_DID(cmd->result, DID_OK);
  3043. SET_RES_MSG(cmd->result, srb->end_message);
  3044. } else if (srb->status & PARITY_ERROR) {
  3045. SET_RES_DID(cmd->result, DID_PARITY);
  3046. SET_RES_MSG(cmd->result, srb->end_message);
  3047. } else { /* No error */
  3048. srb->adapter_status = 0;
  3049. srb->target_status = 0;
  3050. SET_RES_DID(cmd->result, DID_OK);
  3051. }
  3052. }
  3053. if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
  3054. pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
  3055. scsi_sg_count(cmd), dir);
  3056. ckc_only = 0;
  3057. /* Check Error Conditions */
  3058. ckc_e:
  3059. if (cmd->cmnd[0] == INQUIRY) {
  3060. unsigned char *base = NULL;
  3061. struct ScsiInqData *ptr;
  3062. unsigned long flags = 0;
  3063. struct scatterlist* sg = scsi_sglist(cmd);
  3064. size_t offset = 0, len = sizeof(struct ScsiInqData);
  3065. local_irq_save(flags);
  3066. base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
  3067. ptr = (struct ScsiInqData *)(base + offset);
  3068. if (!ckc_only && (cmd->result & RES_DID) == 0
  3069. && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
  3070. && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
  3071. dcb->inquiry7 = ptr->Flags;
  3072. /*if( srb->cmd->cmnd[0] == INQUIRY && */
  3073. /* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
  3074. if ((cmd->result == (DID_OK << 16)
  3075. || status_byte(cmd->result) &
  3076. CHECK_CONDITION)) {
  3077. if (!dcb->init_tcq_flag) {
  3078. add_dev(acb, dcb, ptr);
  3079. dcb->init_tcq_flag = 1;
  3080. }
  3081. }
  3082. scsi_kunmap_atomic_sg(base);
  3083. local_irq_restore(flags);
  3084. }
  3085. /* Here is the info for Doug Gilbert's sg3 ... */
  3086. scsi_set_resid(cmd, srb->total_xfer_length);
  3087. /* This may be interpreted by sb. or not ... */
  3088. cmd->SCp.this_residual = srb->total_xfer_length;
  3089. cmd->SCp.buffers_residual = 0;
  3090. if (debug_enabled(DBG_KG)) {
  3091. if (srb->total_xfer_length)
  3092. dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
  3093. "cmnd=0x%02x Missed %i bytes\n",
  3094. cmd, cmd->device->id, cmd->device->lun,
  3095. cmd->cmnd[0], srb->total_xfer_length);
  3096. }
  3097. srb_going_remove(dcb, srb);
  3098. /* Add to free list */
  3099. if (srb == acb->tmp_srb)
  3100. dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
  3101. else {
  3102. dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
  3103. cmd, cmd->result);
  3104. srb_free_insert(acb, srb);
  3105. }
  3106. pci_unmap_srb(acb, srb);
  3107. cmd->scsi_done(cmd);
  3108. waiting_process_next(acb);
  3109. }
  3110. /* abort all cmds in our queues */
  3111. static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
  3112. struct scsi_cmnd *cmd, u8 force)
  3113. {
  3114. struct DeviceCtlBlk *dcb;
  3115. dprintkl(KERN_INFO, "doing_srb_done: pids ");
  3116. list_for_each_entry(dcb, &acb->dcb_list, list) {
  3117. struct ScsiReqBlk *srb;
  3118. struct ScsiReqBlk *tmp;
  3119. struct scsi_cmnd *p;
  3120. list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
  3121. enum dma_data_direction dir;
  3122. int result;
  3123. p = srb->cmd;
  3124. dir = p->sc_data_direction;
  3125. result = MK_RES(0, did_flag, 0, 0);
  3126. printk("G:%p(%02i-%i) ", p,
  3127. p->device->id, p->device->lun);
  3128. srb_going_remove(dcb, srb);
  3129. free_tag(dcb, srb);
  3130. srb_free_insert(acb, srb);
  3131. p->result = result;
  3132. pci_unmap_srb_sense(acb, srb);
  3133. pci_unmap_srb(acb, srb);
  3134. if (force) {
  3135. /* For new EH, we normally don't need to give commands back,
  3136. * as they all complete or all time out */
  3137. p->scsi_done(p);
  3138. }
  3139. }
  3140. if (!list_empty(&dcb->srb_going_list))
  3141. dprintkl(KERN_DEBUG,
  3142. "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
  3143. dcb->target_id, dcb->target_lun);
  3144. if (dcb->tag_mask)
  3145. dprintkl(KERN_DEBUG,
  3146. "tag_mask for <%02i-%i> should be empty, is %08x!\n",
  3147. dcb->target_id, dcb->target_lun,
  3148. dcb->tag_mask);
  3149. /* Waiting queue */
  3150. list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
  3151. int result;
  3152. p = srb->cmd;
  3153. result = MK_RES(0, did_flag, 0, 0);
  3154. printk("W:%p<%02i-%i>", p, p->device->id,
  3155. p->device->lun);
  3156. srb_waiting_remove(dcb, srb);
  3157. srb_free_insert(acb, srb);
  3158. p->result = result;
  3159. pci_unmap_srb_sense(acb, srb);
  3160. pci_unmap_srb(acb, srb);
  3161. if (force) {
  3162. /* For new EH, we normally don't need to give commands back,
  3163. * as they all complete or all time out */
  3164. cmd->scsi_done(cmd);
  3165. }
  3166. }
  3167. if (!list_empty(&dcb->srb_waiting_list))
  3168. dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
  3169. list_size(&dcb->srb_waiting_list), dcb->target_id,
  3170. dcb->target_lun);
  3171. dcb->flag &= ~ABORT_DEV_;
  3172. }
  3173. printk("\n");
  3174. }
  3175. static void reset_scsi_bus(struct AdapterCtlBlk *acb)
  3176. {
  3177. dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
  3178. acb->acb_flag |= RESET_DEV; /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3179. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
  3180. while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET))
  3181. /* nothing */;
  3182. }
  3183. static void set_basic_config(struct AdapterCtlBlk *acb)
  3184. {
  3185. u8 bval;
  3186. u16 wval;
  3187. DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout);
  3188. if (acb->config & HCC_PARITY)
  3189. bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK;
  3190. else
  3191. bval = PHASELATCH | INITIATOR | BLOCKRST;
  3192. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval);
  3193. /* program configuration 1: Act_Neg (+ Act_Neg_Enh? + Fast_Filter? + DataDis?) */
  3194. DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03); /* was 0x13: default */
  3195. /* program Host ID */
  3196. DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
  3197. /* set ansynchronous transfer */
  3198. DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00);
  3199. /* Turn LED control off */
  3200. wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F;
  3201. DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval);
  3202. /* DMA config */
  3203. wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL;
  3204. wval |=
  3205. DMA_FIFO_HALF_HALF | DMA_ENHANCE /*| DMA_MEM_MULTI_READ */ ;
  3206. DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval);
  3207. /* Clear pending interrupt status */
  3208. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  3209. /* Enable SCSI interrupt */
  3210. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F);
  3211. DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR
  3212. /*| EN_DMAXFERABORT | EN_DMAXFERCOMP | EN_FORCEDMACOMP */
  3213. );
  3214. }
  3215. static void scsi_reset_detect(struct AdapterCtlBlk *acb)
  3216. {
  3217. dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
  3218. /* delay half a second */
  3219. if (timer_pending(&acb->waiting_timer))
  3220. del_timer(&acb->waiting_timer);
  3221. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  3222. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  3223. /*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */
  3224. udelay(500);
  3225. /* Maybe we locked up the bus? Then lets wait even longer ... */
  3226. acb->scsi_host->last_reset =
  3227. jiffies + 5 * HZ / 2 +
  3228. HZ * acb->eeprom.delay_time;
  3229. clear_fifo(acb, "scsi_reset_detect");
  3230. set_basic_config(acb);
  3231. /*1.25 */
  3232. /*DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT); */
  3233. if (acb->acb_flag & RESET_DEV) { /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3234. acb->acb_flag |= RESET_DONE;
  3235. } else {
  3236. acb->acb_flag |= RESET_DETECT;
  3237. reset_dev_param(acb);
  3238. doing_srb_done(acb, DID_RESET, NULL, 1);
  3239. /*DC395x_RecoverSRB( acb ); */
  3240. acb->active_dcb = NULL;
  3241. acb->acb_flag = 0;
  3242. waiting_process_next(acb);
  3243. }
  3244. }
  3245. static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
  3246. struct ScsiReqBlk *srb)
  3247. {
  3248. struct scsi_cmnd *cmd = srb->cmd;
  3249. dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
  3250. cmd, cmd->device->id, cmd->device->lun);
  3251. srb->flag |= AUTO_REQSENSE;
  3252. srb->adapter_status = 0;
  3253. srb->target_status = 0;
  3254. /* KG: Can this prevent crap sense data ? */
  3255. memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
  3256. /* Save some data */
  3257. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
  3258. srb->segment_x[0].address;
  3259. srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
  3260. srb->segment_x[0].length;
  3261. srb->xferred = srb->total_xfer_length;
  3262. /* srb->segment_x : a one entry of S/G list table */
  3263. srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
  3264. srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
  3265. /* Map sense buffer */
  3266. srb->segment_x[0].address =
  3267. pci_map_single(acb->dev, cmd->sense_buffer,
  3268. SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
  3269. dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
  3270. cmd->sense_buffer, srb->segment_x[0].address,
  3271. SCSI_SENSE_BUFFERSIZE);
  3272. srb->sg_count = 1;
  3273. srb->sg_index = 0;
  3274. if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
  3275. dprintkl(KERN_DEBUG,
  3276. "request_sense: (0x%p) failed <%02i-%i>\n",
  3277. srb->cmd, dcb->target_id, dcb->target_lun);
  3278. srb_going_to_waiting_move(dcb, srb);
  3279. waiting_set_timer(acb, HZ / 100);
  3280. }
  3281. }
  3282. /**
  3283. * device_alloc - Allocate a new device instance. This create the
  3284. * devices instance and sets up all the data items. The adapter
  3285. * instance is required to obtain confiuration information for this
  3286. * device. This does *not* add this device to the adapters device
  3287. * list.
  3288. *
  3289. * @acb: The adapter to obtain configuration information from.
  3290. * @target: The target for the new device.
  3291. * @lun: The lun for the new device.
  3292. *
  3293. * Return the new device if successful or NULL on failure.
  3294. **/
  3295. static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
  3296. u8 target, u8 lun)
  3297. {
  3298. struct NvRamType *eeprom = &acb->eeprom;
  3299. u8 period_index = eeprom->target[target].period & 0x07;
  3300. struct DeviceCtlBlk *dcb;
  3301. dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
  3302. dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
  3303. if (!dcb)
  3304. return NULL;
  3305. dcb->acb = NULL;
  3306. INIT_LIST_HEAD(&dcb->srb_going_list);
  3307. INIT_LIST_HEAD(&dcb->srb_waiting_list);
  3308. dcb->active_srb = NULL;
  3309. dcb->tag_mask = 0;
  3310. dcb->max_command = 1;
  3311. dcb->target_id = target;
  3312. dcb->target_lun = lun;
  3313. #ifndef DC395x_NO_DISCONNECT
  3314. dcb->identify_msg =
  3315. IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
  3316. #else
  3317. dcb->identify_msg = IDENTIFY(0, lun);
  3318. #endif
  3319. dcb->dev_mode = eeprom->target[target].cfg0;
  3320. dcb->inquiry7 = 0;
  3321. dcb->sync_mode = 0;
  3322. dcb->min_nego_period = clock_period[period_index];
  3323. dcb->sync_period = 0;
  3324. dcb->sync_offset = 0;
  3325. dcb->flag = 0;
  3326. #ifndef DC395x_NO_WIDE
  3327. if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
  3328. && (acb->config & HCC_WIDE_CARD))
  3329. dcb->sync_mode |= WIDE_NEGO_ENABLE;
  3330. #endif
  3331. #ifndef DC395x_NO_SYNC
  3332. if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
  3333. if (!(lun) || current_sync_offset)
  3334. dcb->sync_mode |= SYNC_NEGO_ENABLE;
  3335. #endif
  3336. if (dcb->target_lun != 0) {
  3337. /* Copy settings */
  3338. struct DeviceCtlBlk *p;
  3339. list_for_each_entry(p, &acb->dcb_list, list)
  3340. if (p->target_id == dcb->target_id)
  3341. break;
  3342. dprintkdbg(DBG_1,
  3343. "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
  3344. dcb->target_id, dcb->target_lun,
  3345. p->target_id, p->target_lun);
  3346. dcb->sync_mode = p->sync_mode;
  3347. dcb->sync_period = p->sync_period;
  3348. dcb->min_nego_period = p->min_nego_period;
  3349. dcb->sync_offset = p->sync_offset;
  3350. dcb->inquiry7 = p->inquiry7;
  3351. }
  3352. return dcb;
  3353. }
  3354. /**
  3355. * adapter_add_device - Adds the device instance to the adaptor instance.
  3356. *
  3357. * @acb: The adapter device to be updated
  3358. * @dcb: A newly created and initialised device instance to add.
  3359. **/
  3360. static void adapter_add_device(struct AdapterCtlBlk *acb,
  3361. struct DeviceCtlBlk *dcb)
  3362. {
  3363. /* backpointer to adapter */
  3364. dcb->acb = acb;
  3365. /* set run_robin to this device if it is currently empty */
  3366. if (list_empty(&acb->dcb_list))
  3367. acb->dcb_run_robin = dcb;
  3368. /* add device to list */
  3369. list_add_tail(&dcb->list, &acb->dcb_list);
  3370. /* update device maps */
  3371. acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
  3372. acb->children[dcb->target_id][dcb->target_lun] = dcb;
  3373. }
  3374. /**
  3375. * adapter_remove_device - Removes the device instance from the adaptor
  3376. * instance. The device instance is not check in any way or freed by this.
  3377. * The caller is expected to take care of that. This will simply remove the
  3378. * device from the adapters data strcutures.
  3379. *
  3380. * @acb: The adapter device to be updated
  3381. * @dcb: A device that has previously been added to the adapter.
  3382. **/
  3383. static void adapter_remove_device(struct AdapterCtlBlk *acb,
  3384. struct DeviceCtlBlk *dcb)
  3385. {
  3386. struct DeviceCtlBlk *i;
  3387. struct DeviceCtlBlk *tmp;
  3388. dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
  3389. dcb->target_id, dcb->target_lun);
  3390. /* fix up any pointers to this device that we have in the adapter */
  3391. if (acb->active_dcb == dcb)
  3392. acb->active_dcb = NULL;
  3393. if (acb->dcb_run_robin == dcb)
  3394. acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
  3395. /* unlink from list */
  3396. list_for_each_entry_safe(i, tmp, &acb->dcb_list, list)
  3397. if (dcb == i) {
  3398. list_del(&i->list);
  3399. break;
  3400. }
  3401. /* clear map and children */
  3402. acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
  3403. acb->children[dcb->target_id][dcb->target_lun] = NULL;
  3404. dcb->acb = NULL;
  3405. }
  3406. /**
  3407. * adapter_remove_and_free_device - Removes a single device from the adapter
  3408. * and then frees the device information.
  3409. *
  3410. * @acb: The adapter device to be updated
  3411. * @dcb: A device that has previously been added to the adapter.
  3412. */
  3413. static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
  3414. struct DeviceCtlBlk *dcb)
  3415. {
  3416. if (list_size(&dcb->srb_going_list) > 1) {
  3417. dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
  3418. "Won't remove because of %i active requests.\n",
  3419. dcb->target_id, dcb->target_lun,
  3420. list_size(&dcb->srb_going_list));
  3421. return;
  3422. }
  3423. adapter_remove_device(acb, dcb);
  3424. kfree(dcb);
  3425. }
  3426. /**
  3427. * adapter_remove_and_free_all_devices - Removes and frees all of the
  3428. * devices associated with the specified adapter.
  3429. *
  3430. * @acb: The adapter from which all devices should be removed.
  3431. **/
  3432. static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
  3433. {
  3434. struct DeviceCtlBlk *dcb;
  3435. struct DeviceCtlBlk *tmp;
  3436. dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
  3437. list_size(&acb->dcb_list));
  3438. list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
  3439. adapter_remove_and_free_device(acb, dcb);
  3440. }
  3441. /**
  3442. * dc395x_slave_alloc - Called by the scsi mid layer to tell us about a new
  3443. * scsi device that we need to deal with. We allocate a new device and then
  3444. * insert that device into the adapters device list.
  3445. *
  3446. * @scsi_device: The new scsi device that we need to handle.
  3447. **/
  3448. static int dc395x_slave_alloc(struct scsi_device *scsi_device)
  3449. {
  3450. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
  3451. struct DeviceCtlBlk *dcb;
  3452. dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
  3453. if (!dcb)
  3454. return -ENOMEM;
  3455. adapter_add_device(acb, dcb);
  3456. return 0;
  3457. }
  3458. /**
  3459. * dc395x_slave_destroy - Called by the scsi mid layer to tell us about a
  3460. * device that is going away.
  3461. *
  3462. * @scsi_device: The new scsi device that we need to handle.
  3463. **/
  3464. static void dc395x_slave_destroy(struct scsi_device *scsi_device)
  3465. {
  3466. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
  3467. struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
  3468. if (dcb)
  3469. adapter_remove_and_free_device(acb, dcb);
  3470. }
  3471. /**
  3472. * trms1040_wait_30us: wait for 30 us
  3473. *
  3474. * Waits for 30us (using the chip by the looks of it..)
  3475. *
  3476. * @io_port: base I/O address
  3477. **/
  3478. static void __devinit trms1040_wait_30us(unsigned long io_port)
  3479. {
  3480. /* ScsiPortStallExecution(30); wait 30 us */
  3481. outb(5, io_port + TRM_S1040_GEN_TIMER);
  3482. while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT))
  3483. /* nothing */ ;
  3484. }
  3485. /**
  3486. * trms1040_write_cmd - write the secified command and address to
  3487. * chip
  3488. *
  3489. * @io_port: base I/O address
  3490. * @cmd: SB + op code (command) to send
  3491. * @addr: address to send
  3492. **/
  3493. static void __devinit trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
  3494. {
  3495. int i;
  3496. u8 send_data;
  3497. /* program SB + OP code */
  3498. for (i = 0; i < 3; i++, cmd <<= 1) {
  3499. send_data = NVR_SELECT;
  3500. if (cmd & 0x04) /* Start from bit 2 */
  3501. send_data |= NVR_BITOUT;
  3502. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3503. trms1040_wait_30us(io_port);
  3504. outb((send_data | NVR_CLOCK),
  3505. io_port + TRM_S1040_GEN_NVRAM);
  3506. trms1040_wait_30us(io_port);
  3507. }
  3508. /* send address */
  3509. for (i = 0; i < 7; i++, addr <<= 1) {
  3510. send_data = NVR_SELECT;
  3511. if (addr & 0x40) /* Start from bit 6 */
  3512. send_data |= NVR_BITOUT;
  3513. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3514. trms1040_wait_30us(io_port);
  3515. outb((send_data | NVR_CLOCK),
  3516. io_port + TRM_S1040_GEN_NVRAM);
  3517. trms1040_wait_30us(io_port);
  3518. }
  3519. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3520. trms1040_wait_30us(io_port);
  3521. }
  3522. /**
  3523. * trms1040_set_data - store a single byte in the eeprom
  3524. *
  3525. * Called from write all to write a single byte into the SSEEPROM
  3526. * Which is done one bit at a time.
  3527. *
  3528. * @io_port: base I/O address
  3529. * @addr: offset into EEPROM
  3530. * @byte: bytes to write
  3531. **/
  3532. static void __devinit trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
  3533. {
  3534. int i;
  3535. u8 send_data;
  3536. /* Send write command & address */
  3537. trms1040_write_cmd(io_port, 0x05, addr);
  3538. /* Write data */
  3539. for (i = 0; i < 8; i++, byte <<= 1) {
  3540. send_data = NVR_SELECT;
  3541. if (byte & 0x80) /* Start from bit 7 */
  3542. send_data |= NVR_BITOUT;
  3543. outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
  3544. trms1040_wait_30us(io_port);
  3545. outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3546. trms1040_wait_30us(io_port);
  3547. }
  3548. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3549. trms1040_wait_30us(io_port);
  3550. /* Disable chip select */
  3551. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3552. trms1040_wait_30us(io_port);
  3553. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3554. trms1040_wait_30us(io_port);
  3555. /* Wait for write ready */
  3556. while (1) {
  3557. outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3558. trms1040_wait_30us(io_port);
  3559. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3560. trms1040_wait_30us(io_port);
  3561. if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN)
  3562. break;
  3563. }
  3564. /* Disable chip select */
  3565. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3566. }
  3567. /**
  3568. * trms1040_write_all - write 128 bytes to the eeprom
  3569. *
  3570. * Write the supplied 128 bytes to the chips SEEPROM
  3571. *
  3572. * @eeprom: the data to write
  3573. * @io_port: the base io port
  3574. **/
  3575. static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
  3576. {
  3577. u8 *b_eeprom = (u8 *)eeprom;
  3578. u8 addr;
  3579. /* Enable SEEPROM */
  3580. outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
  3581. io_port + TRM_S1040_GEN_CONTROL);
  3582. /* write enable */
  3583. trms1040_write_cmd(io_port, 0x04, 0xFF);
  3584. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3585. trms1040_wait_30us(io_port);
  3586. /* write */
  3587. for (addr = 0; addr < 128; addr++, b_eeprom++)
  3588. trms1040_set_data(io_port, addr, *b_eeprom);
  3589. /* write disable */
  3590. trms1040_write_cmd(io_port, 0x04, 0x00);
  3591. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3592. trms1040_wait_30us(io_port);
  3593. /* Disable SEEPROM */
  3594. outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
  3595. io_port + TRM_S1040_GEN_CONTROL);
  3596. }
  3597. /**
  3598. * trms1040_get_data - get a single byte from the eeprom
  3599. *
  3600. * Called from read all to read a single byte into the SSEEPROM
  3601. * Which is done one bit at a time.
  3602. *
  3603. * @io_port: base I/O address
  3604. * @addr: offset into SEEPROM
  3605. *
  3606. * Returns the byte read.
  3607. **/
  3608. static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr)
  3609. {
  3610. int i;
  3611. u8 read_byte;
  3612. u8 result = 0;
  3613. /* Send read command & address */
  3614. trms1040_write_cmd(io_port, 0x06, addr);
  3615. /* read data */
  3616. for (i = 0; i < 8; i++) {
  3617. outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
  3618. trms1040_wait_30us(io_port);
  3619. outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
  3620. /* Get data bit while falling edge */
  3621. read_byte = inb(io_port + TRM_S1040_GEN_NVRAM);
  3622. result <<= 1;
  3623. if (read_byte & NVR_BITIN)
  3624. result |= 1;
  3625. trms1040_wait_30us(io_port);
  3626. }
  3627. /* Disable chip select */
  3628. outb(0, io_port + TRM_S1040_GEN_NVRAM);
  3629. return result;
  3630. }
  3631. /**
  3632. * trms1040_read_all - read all bytes from the eeprom
  3633. *
  3634. * Read the 128 bytes from the SEEPROM.
  3635. *
  3636. * @eeprom: where to store the data
  3637. * @io_port: the base io port
  3638. **/
  3639. static void __devinit trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
  3640. {
  3641. u8 *b_eeprom = (u8 *)eeprom;
  3642. u8 addr;
  3643. /* Enable SEEPROM */
  3644. outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
  3645. io_port + TRM_S1040_GEN_CONTROL);
  3646. /* read details */
  3647. for (addr = 0; addr < 128; addr++, b_eeprom++)
  3648. *b_eeprom = trms1040_get_data(io_port, addr);
  3649. /* Disable SEEPROM */
  3650. outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
  3651. io_port + TRM_S1040_GEN_CONTROL);
  3652. }
  3653. /**
  3654. * check_eeprom - get and check contents of the eeprom
  3655. *
  3656. * Read seeprom 128 bytes into the memory provider in eeprom.
  3657. * Checks the checksum and if it's not correct it uses a set of default
  3658. * values.
  3659. *
  3660. * @eeprom: caller allocated strcuture to read the eeprom data into
  3661. * @io_port: io port to read from
  3662. **/
  3663. static void __devinit check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
  3664. {
  3665. u16 *w_eeprom = (u16 *)eeprom;
  3666. u16 w_addr;
  3667. u16 cksum;
  3668. u32 d_addr;
  3669. u32 *d_eeprom;
  3670. trms1040_read_all(eeprom, io_port); /* read eeprom */
  3671. cksum = 0;
  3672. for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64;
  3673. w_addr++, w_eeprom++)
  3674. cksum += *w_eeprom;
  3675. if (cksum != 0x1234) {
  3676. /*
  3677. * Checksum is wrong.
  3678. * Load a set of defaults into the eeprom buffer
  3679. */
  3680. dprintkl(KERN_WARNING,
  3681. "EEProm checksum error: using default values and options.\n");
  3682. eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
  3683. eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
  3684. eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
  3685. eeprom->sub_sys_id[1] =
  3686. (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
  3687. eeprom->sub_class = 0x00;
  3688. eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
  3689. eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
  3690. eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
  3691. eeprom->device_id[1] =
  3692. (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
  3693. eeprom->reserved = 0x00;
  3694. for (d_addr = 0, d_eeprom = (u32 *)eeprom->target;
  3695. d_addr < 16; d_addr++, d_eeprom++)
  3696. *d_eeprom = 0x00000077; /* cfg3,cfg2,period,cfg0 */
  3697. *d_eeprom++ = 0x04000F07; /* max_tag,delay_time,channel_cfg,scsi_id */
  3698. *d_eeprom++ = 0x00000015; /* reserved1,boot_lun,boot_target,reserved0 */
  3699. for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++)
  3700. *d_eeprom = 0x00;
  3701. /* Now load defaults (maybe set by boot/module params) */
  3702. set_safe_settings();
  3703. fix_settings();
  3704. eeprom_override(eeprom);
  3705. eeprom->cksum = 0x00;
  3706. for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom;
  3707. w_addr < 63; w_addr++, w_eeprom++)
  3708. cksum += *w_eeprom;
  3709. *w_eeprom = 0x1234 - cksum;
  3710. trms1040_write_all(eeprom, io_port);
  3711. eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value;
  3712. } else {
  3713. set_safe_settings();
  3714. eeprom_index_to_delay(eeprom);
  3715. eeprom_override(eeprom);
  3716. }
  3717. }
  3718. /**
  3719. * print_eeprom_settings - output the eeprom settings
  3720. * to the kernel log so people can see what they were.
  3721. *
  3722. * @eeprom: The eeprom data strucutre to show details for.
  3723. **/
  3724. static void __devinit print_eeprom_settings(struct NvRamType *eeprom)
  3725. {
  3726. dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
  3727. eeprom->scsi_id,
  3728. eeprom->target[0].period,
  3729. clock_speed[eeprom->target[0].period] / 10,
  3730. clock_speed[eeprom->target[0].period] % 10,
  3731. eeprom->target[0].cfg0);
  3732. dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
  3733. eeprom->channel_cfg, eeprom->max_tag,
  3734. 1 << eeprom->max_tag, eeprom->delay_time);
  3735. }
  3736. /* Free SG tables */
  3737. static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
  3738. {
  3739. int i;
  3740. const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
  3741. for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
  3742. kfree(acb->srb_array[i].segment_x);
  3743. }
  3744. /*
  3745. * Allocate SG tables; as we have to pci_map them, an SG list (struct SGentry*)
  3746. * should never cross a page boundary */
  3747. static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
  3748. {
  3749. const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
  3750. *SEGMENTX_LEN;
  3751. int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
  3752. const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
  3753. int srb_idx = 0;
  3754. unsigned i = 0;
  3755. struct SGentry *uninitialized_var(ptr);
  3756. for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
  3757. acb->srb_array[i].segment_x = NULL;
  3758. dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
  3759. while (pages--) {
  3760. ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
  3761. if (!ptr) {
  3762. adapter_sg_tables_free(acb);
  3763. return 1;
  3764. }
  3765. dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
  3766. PAGE_SIZE, ptr, srb_idx);
  3767. i = 0;
  3768. while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
  3769. acb->srb_array[srb_idx++].segment_x =
  3770. ptr + (i++ * DC395x_MAX_SG_LISTENTRY);
  3771. }
  3772. if (i < srbs_per_page)
  3773. acb->srb.segment_x =
  3774. ptr + (i * DC395x_MAX_SG_LISTENTRY);
  3775. else
  3776. dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
  3777. return 0;
  3778. }
  3779. /**
  3780. * adapter_print_config - print adapter connection and termination
  3781. * config
  3782. *
  3783. * The io port in the adapter needs to have been set before calling
  3784. * this function.
  3785. *
  3786. * @acb: The adapter to print the information for.
  3787. **/
  3788. static void __devinit adapter_print_config(struct AdapterCtlBlk *acb)
  3789. {
  3790. u8 bval;
  3791. bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
  3792. dprintkl(KERN_INFO, "%sConnectors: ",
  3793. ((bval & WIDESCSI) ? "(Wide) " : ""));
  3794. if (!(bval & CON5068))
  3795. printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
  3796. if (!(bval & CON68))
  3797. printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)");
  3798. if (!(bval & CON50))
  3799. printk("int50 ");
  3800. if ((bval & (CON5068 | CON50 | CON68)) ==
  3801. 0 /*(CON5068 | CON50 | CON68) */ )
  3802. printk(" Oops! (All 3?) ");
  3803. bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL);
  3804. printk(" Termination: ");
  3805. if (bval & DIS_TERM)
  3806. printk("Disabled\n");
  3807. else {
  3808. if (bval & AUTOTERM)
  3809. printk("Auto ");
  3810. if (bval & LOW8TERM)
  3811. printk("Low ");
  3812. if (bval & UP8TERM)
  3813. printk("High ");
  3814. printk("\n");
  3815. }
  3816. }
  3817. /**
  3818. * adapter_init_params - Initialize the various parameters in the
  3819. * adapter structure. Note that the pointer to the scsi_host is set
  3820. * early (when this instance is created) and the io_port and irq
  3821. * values are set later after they have been reserved. This just gets
  3822. * everything set to a good starting position.
  3823. *
  3824. * The eeprom structure in the adapter needs to have been set before
  3825. * calling this function.
  3826. *
  3827. * @acb: The adapter to initialize.
  3828. **/
  3829. static void __devinit adapter_init_params(struct AdapterCtlBlk *acb)
  3830. {
  3831. struct NvRamType *eeprom = &acb->eeprom;
  3832. int i;
  3833. /* NOTE: acb->scsi_host is set at scsi_host/acb creation time */
  3834. /* NOTE: acb->io_port_base is set at port registration time */
  3835. /* NOTE: acb->io_port_len is set at port registration time */
  3836. INIT_LIST_HEAD(&acb->dcb_list);
  3837. acb->dcb_run_robin = NULL;
  3838. acb->active_dcb = NULL;
  3839. INIT_LIST_HEAD(&acb->srb_free_list);
  3840. /* temp SRB for Q tag used or abort command used */
  3841. acb->tmp_srb = &acb->srb;
  3842. init_timer(&acb->waiting_timer);
  3843. init_timer(&acb->selto_timer);
  3844. acb->srb_count = DC395x_MAX_SRB_CNT;
  3845. acb->sel_timeout = DC395x_SEL_TIMEOUT; /* timeout=250ms */
  3846. /* NOTE: acb->irq_level is set at IRQ registration time */
  3847. acb->tag_max_num = 1 << eeprom->max_tag;
  3848. if (acb->tag_max_num > 30)
  3849. acb->tag_max_num = 30;
  3850. acb->acb_flag = 0; /* RESET_DETECT, RESET_DONE, RESET_DEV */
  3851. acb->gmode2 = eeprom->channel_cfg;
  3852. acb->config = 0; /* NOTE: actually set in adapter_init_chip */
  3853. if (eeprom->channel_cfg & NAC_SCANLUN)
  3854. acb->lun_chk = 1;
  3855. acb->scan_devices = 1;
  3856. acb->scsi_host->this_id = eeprom->scsi_id;
  3857. acb->hostid_bit = (1 << acb->scsi_host->this_id);
  3858. for (i = 0; i < DC395x_MAX_SCSI_ID; i++)
  3859. acb->dcb_map[i] = 0;
  3860. acb->msg_len = 0;
  3861. /* link static array of srbs into the srb free list */
  3862. for (i = 0; i < acb->srb_count - 1; i++)
  3863. srb_free_insert(acb, &acb->srb_array[i]);
  3864. }
  3865. /**
  3866. * adapter_init_host - Initialize the scsi host instance based on
  3867. * values that we have already stored in the adapter instance. There's
  3868. * some mention that a lot of these are deprecated, so we won't use
  3869. * them (we'll use the ones in the adapter instance) but we'll fill
  3870. * them in in case something else needs them.
  3871. *
  3872. * The eeprom structure, irq and io ports in the adapter need to have
  3873. * been set before calling this function.
  3874. *
  3875. * @host: The scsi host instance to fill in the values for.
  3876. **/
  3877. static void __devinit adapter_init_scsi_host(struct Scsi_Host *host)
  3878. {
  3879. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
  3880. struct NvRamType *eeprom = &acb->eeprom;
  3881. host->max_cmd_len = 24;
  3882. host->can_queue = DC395x_MAX_CMD_QUEUE;
  3883. host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN;
  3884. host->this_id = (int)eeprom->scsi_id;
  3885. host->io_port = acb->io_port_base;
  3886. host->n_io_port = acb->io_port_len;
  3887. host->dma_channel = -1;
  3888. host->unique_id = acb->io_port_base;
  3889. host->irq = acb->irq_level;
  3890. host->last_reset = jiffies;
  3891. host->max_id = 16;
  3892. if (host->max_id - 1 == eeprom->scsi_id)
  3893. host->max_id--;
  3894. #ifdef CONFIG_SCSI_MULTI_LUN
  3895. if (eeprom->channel_cfg & NAC_SCANLUN)
  3896. host->max_lun = 8;
  3897. else
  3898. host->max_lun = 1;
  3899. #else
  3900. host->max_lun = 1;
  3901. #endif
  3902. }
  3903. /**
  3904. * adapter_init_chip - Get the chip into a know state and figure out
  3905. * some of the settings that apply to this adapter.
  3906. *
  3907. * The io port in the adapter needs to have been set before calling
  3908. * this function. The config will be configured correctly on return.
  3909. *
  3910. * @acb: The adapter which we are to init.
  3911. **/
  3912. static void __devinit adapter_init_chip(struct AdapterCtlBlk *acb)
  3913. {
  3914. struct NvRamType *eeprom = &acb->eeprom;
  3915. /* Mask all the interrupt */
  3916. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
  3917. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
  3918. /* Reset SCSI module */
  3919. DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
  3920. /* Reset PCI/DMA module */
  3921. DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
  3922. udelay(20);
  3923. /* program configuration 0 */
  3924. acb->config = HCC_AUTOTERM | HCC_PARITY;
  3925. if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI)
  3926. acb->config |= HCC_WIDE_CARD;
  3927. if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET)
  3928. acb->config |= HCC_SCSI_RESET;
  3929. if (acb->config & HCC_SCSI_RESET) {
  3930. dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
  3931. DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
  3932. /*while (!( DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET )); */
  3933. /*spin_unlock_irq (&io_request_lock); */
  3934. udelay(500);
  3935. acb->scsi_host->last_reset =
  3936. jiffies + HZ / 2 +
  3937. HZ * acb->eeprom.delay_time;
  3938. /*spin_lock_irq (&io_request_lock); */
  3939. }
  3940. }
  3941. /**
  3942. * init_adapter - Grab the resource for the card, setup the adapter
  3943. * information, set the card into a known state, create the various
  3944. * tables etc etc. This basically gets all adapter information all up
  3945. * to date, initialised and gets the chip in sync with it.
  3946. *
  3947. * @host: This hosts adapter structure
  3948. * @io_port: The base I/O port
  3949. * @irq: IRQ
  3950. *
  3951. * Returns 0 if the initialization succeeds, any other value on
  3952. * failure.
  3953. **/
  3954. static int __devinit adapter_init(struct AdapterCtlBlk *acb,
  3955. unsigned long io_port, u32 io_port_len, unsigned int irq)
  3956. {
  3957. if (!request_region(io_port, io_port_len, DC395X_NAME)) {
  3958. dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
  3959. goto failed;
  3960. }
  3961. /* store port base to indicate we have registered it */
  3962. acb->io_port_base = io_port;
  3963. acb->io_port_len = io_port_len;
  3964. if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
  3965. /* release the region we just claimed */
  3966. dprintkl(KERN_INFO, "Failed to register IRQ\n");
  3967. goto failed;
  3968. }
  3969. /* store irq to indicate we have registered it */
  3970. acb->irq_level = irq;
  3971. /* get eeprom configuration information and command line settings etc */
  3972. check_eeprom(&acb->eeprom, io_port);
  3973. print_eeprom_settings(&acb->eeprom);
  3974. /* setup adapter control block */
  3975. adapter_init_params(acb);
  3976. /* display card connectors/termination settings */
  3977. adapter_print_config(acb);
  3978. if (adapter_sg_tables_alloc(acb)) {
  3979. dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
  3980. goto failed;
  3981. }
  3982. adapter_init_scsi_host(acb->scsi_host);
  3983. adapter_init_chip(acb);
  3984. set_basic_config(acb);
  3985. dprintkdbg(DBG_0,
  3986. "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
  3987. "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
  3988. acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
  3989. sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
  3990. return 0;
  3991. failed:
  3992. if (acb->irq_level)
  3993. free_irq(acb->irq_level, acb);
  3994. if (acb->io_port_base)
  3995. release_region(acb->io_port_base, acb->io_port_len);
  3996. adapter_sg_tables_free(acb);
  3997. return 1;
  3998. }
  3999. /**
  4000. * adapter_uninit_chip - cleanly shut down the scsi controller chip,
  4001. * stopping all operations and disabling interrupt generation on the
  4002. * card.
  4003. *
  4004. * @acb: The adapter which we are to shutdown.
  4005. **/
  4006. static void adapter_uninit_chip(struct AdapterCtlBlk *acb)
  4007. {
  4008. /* disable interrupts */
  4009. DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0);
  4010. DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0);
  4011. /* reset the scsi bus */
  4012. if (acb->config & HCC_SCSI_RESET)
  4013. reset_scsi_bus(acb);
  4014. /* clear any pending interrupt state */
  4015. DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
  4016. }
  4017. /**
  4018. * adapter_uninit - Shut down the chip and release any resources that
  4019. * we had allocated. Once this returns the adapter should not be used
  4020. * anymore.
  4021. *
  4022. * @acb: The adapter which we are to un-initialize.
  4023. **/
  4024. static void adapter_uninit(struct AdapterCtlBlk *acb)
  4025. {
  4026. unsigned long flags;
  4027. DC395x_LOCK_IO(acb->scsi_host, flags);
  4028. /* remove timers */
  4029. if (timer_pending(&acb->waiting_timer))
  4030. del_timer(&acb->waiting_timer);
  4031. if (timer_pending(&acb->selto_timer))
  4032. del_timer(&acb->selto_timer);
  4033. adapter_uninit_chip(acb);
  4034. adapter_remove_and_free_all_devices(acb);
  4035. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  4036. if (acb->irq_level)
  4037. free_irq(acb->irq_level, acb);
  4038. if (acb->io_port_base)
  4039. release_region(acb->io_port_base, acb->io_port_len);
  4040. adapter_sg_tables_free(acb);
  4041. }
  4042. #undef SPRINTF
  4043. #define SPRINTF(args...) pos += sprintf(pos, args)
  4044. #undef YESNO
  4045. #define YESNO(YN) \
  4046. if (YN) SPRINTF(" Yes ");\
  4047. else SPRINTF(" No ")
  4048. static int dc395x_proc_info(struct Scsi_Host *host, char *buffer,
  4049. char **start, off_t offset, int length, int inout)
  4050. {
  4051. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
  4052. int spd, spd1;
  4053. char *pos = buffer;
  4054. struct DeviceCtlBlk *dcb;
  4055. unsigned long flags;
  4056. int dev;
  4057. if (inout) /* Has data been written to the file ? */
  4058. return -EPERM;
  4059. SPRINTF(DC395X_BANNER " PCI SCSI Host Adapter\n");
  4060. SPRINTF(" Driver Version " DC395X_VERSION "\n");
  4061. DC395x_LOCK_IO(acb->scsi_host, flags);
  4062. SPRINTF("SCSI Host Nr %i, ", host->host_no);
  4063. SPRINTF("DC395U/UW/F DC315/U %s\n",
  4064. (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
  4065. SPRINTF("io_port_base 0x%04lx, ", acb->io_port_base);
  4066. SPRINTF("irq_level 0x%04x, ", acb->irq_level);
  4067. SPRINTF(" SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
  4068. SPRINTF("MaxID %i, MaxLUN %i, ", host->max_id, host->max_lun);
  4069. SPRINTF("AdapterID %i\n", host->this_id);
  4070. SPRINTF("tag_max_num %i", acb->tag_max_num);
  4071. /*SPRINTF(", DMA_Status %i\n", DC395x_read8(acb, TRM_S1040_DMA_STATUS)); */
  4072. SPRINTF(", FilterCfg 0x%02x",
  4073. DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
  4074. SPRINTF(", DelayReset %is\n", acb->eeprom.delay_time);
  4075. /*SPRINTF("\n"); */
  4076. SPRINTF("Nr of DCBs: %i\n", list_size(&acb->dcb_list));
  4077. SPRINTF
  4078. ("Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
  4079. acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
  4080. acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
  4081. acb->dcb_map[6], acb->dcb_map[7]);
  4082. SPRINTF
  4083. (" %02x %02x %02x %02x %02x %02x %02x %02x\n",
  4084. acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
  4085. acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
  4086. acb->dcb_map[14], acb->dcb_map[15]);
  4087. SPRINTF
  4088. ("Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
  4089. dev = 0;
  4090. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4091. int nego_period;
  4092. SPRINTF("%02i %02i %02i ", dev, dcb->target_id,
  4093. dcb->target_lun);
  4094. YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
  4095. YESNO(dcb->sync_offset);
  4096. YESNO(dcb->sync_period & WIDE_SYNC);
  4097. YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
  4098. YESNO(dcb->dev_mode & NTC_DO_SEND_START);
  4099. YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
  4100. nego_period = clock_period[dcb->sync_period & 0x07] << 2;
  4101. if (dcb->sync_offset)
  4102. SPRINTF(" %03i ns ", nego_period);
  4103. else
  4104. SPRINTF(" (%03i ns)", (dcb->min_nego_period << 2));
  4105. if (dcb->sync_offset & 0x0f) {
  4106. spd = 1000 / (nego_period);
  4107. spd1 = 1000 % (nego_period);
  4108. spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
  4109. SPRINTF(" %2i.%1i M %02i ", spd, spd1,
  4110. (dcb->sync_offset & 0x0f));
  4111. } else
  4112. SPRINTF(" ");
  4113. /* Add more info ... */
  4114. SPRINTF(" %02i\n", dcb->max_command);
  4115. dev++;
  4116. }
  4117. if (timer_pending(&acb->waiting_timer))
  4118. SPRINTF("Waiting queue timer running\n");
  4119. else
  4120. SPRINTF("\n");
  4121. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4122. struct ScsiReqBlk *srb;
  4123. if (!list_empty(&dcb->srb_waiting_list))
  4124. SPRINTF("DCB (%02i-%i): Waiting: %i:",
  4125. dcb->target_id, dcb->target_lun,
  4126. list_size(&dcb->srb_waiting_list));
  4127. list_for_each_entry(srb, &dcb->srb_waiting_list, list)
  4128. SPRINTF(" %p", srb->cmd);
  4129. if (!list_empty(&dcb->srb_going_list))
  4130. SPRINTF("\nDCB (%02i-%i): Going : %i:",
  4131. dcb->target_id, dcb->target_lun,
  4132. list_size(&dcb->srb_going_list));
  4133. list_for_each_entry(srb, &dcb->srb_going_list, list)
  4134. SPRINTF(" %p", srb->cmd);
  4135. if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
  4136. SPRINTF("\n");
  4137. }
  4138. if (debug_enabled(DBG_1)) {
  4139. SPRINTF("DCB list for ACB %p:\n", acb);
  4140. list_for_each_entry(dcb, &acb->dcb_list, list) {
  4141. SPRINTF("%p -> ", dcb);
  4142. }
  4143. SPRINTF("END\n");
  4144. }
  4145. *start = buffer + offset;
  4146. DC395x_UNLOCK_IO(acb->scsi_host, flags);
  4147. if (pos - buffer < offset)
  4148. return 0;
  4149. else if (pos - buffer - offset < length)
  4150. return pos - buffer - offset;
  4151. else
  4152. return length;
  4153. }
  4154. static struct scsi_host_template dc395x_driver_template = {
  4155. .module = THIS_MODULE,
  4156. .proc_name = DC395X_NAME,
  4157. .proc_info = dc395x_proc_info,
  4158. .name = DC395X_BANNER " " DC395X_VERSION,
  4159. .queuecommand = dc395x_queue_command,
  4160. .bios_param = dc395x_bios_param,
  4161. .slave_alloc = dc395x_slave_alloc,
  4162. .slave_destroy = dc395x_slave_destroy,
  4163. .can_queue = DC395x_MAX_CAN_QUEUE,
  4164. .this_id = 7,
  4165. .sg_tablesize = DC395x_MAX_SG_TABLESIZE,
  4166. .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
  4167. .eh_abort_handler = dc395x_eh_abort,
  4168. .eh_bus_reset_handler = dc395x_eh_bus_reset,
  4169. .use_clustering = DISABLE_CLUSTERING,
  4170. };
  4171. /**
  4172. * banner_display - Display banner on first instance of driver
  4173. * initialized.
  4174. **/
  4175. static void banner_display(void)
  4176. {
  4177. static int banner_done = 0;
  4178. if (!banner_done)
  4179. {
  4180. dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
  4181. banner_done = 1;
  4182. }
  4183. }
  4184. /**
  4185. * dc395x_init_one - Initialise a single instance of the adapter.
  4186. *
  4187. * The PCI layer will call this once for each instance of the adapter
  4188. * that it finds in the system. The pci_dev strcuture indicates which
  4189. * instance we are being called from.
  4190. *
  4191. * @dev: The PCI device to initialize.
  4192. * @id: Looks like a pointer to the entry in our pci device table
  4193. * that was actually matched by the PCI subsystem.
  4194. *
  4195. * Returns 0 on success, or an error code (-ve) on failure.
  4196. **/
  4197. static int __devinit dc395x_init_one(struct pci_dev *dev,
  4198. const struct pci_device_id *id)
  4199. {
  4200. struct Scsi_Host *scsi_host = NULL;
  4201. struct AdapterCtlBlk *acb = NULL;
  4202. unsigned long io_port_base;
  4203. unsigned int io_port_len;
  4204. unsigned int irq;
  4205. dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
  4206. banner_display();
  4207. if (pci_enable_device(dev))
  4208. {
  4209. dprintkl(KERN_INFO, "PCI Enable device failed.\n");
  4210. return -ENODEV;
  4211. }
  4212. io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
  4213. io_port_len = pci_resource_len(dev, 0);
  4214. irq = dev->irq;
  4215. dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
  4216. /* allocate scsi host information (includes out adapter) */
  4217. scsi_host = scsi_host_alloc(&dc395x_driver_template,
  4218. sizeof(struct AdapterCtlBlk));
  4219. if (!scsi_host) {
  4220. dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
  4221. goto fail;
  4222. }
  4223. acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
  4224. acb->scsi_host = scsi_host;
  4225. acb->dev = dev;
  4226. /* initialise the adapter and everything we need */
  4227. if (adapter_init(acb, io_port_base, io_port_len, irq)) {
  4228. dprintkl(KERN_INFO, "adapter init failed\n");
  4229. goto fail;
  4230. }
  4231. pci_set_master(dev);
  4232. /* get the scsi mid level to scan for new devices on the bus */
  4233. if (scsi_add_host(scsi_host, &dev->dev)) {
  4234. dprintkl(KERN_ERR, "scsi_add_host failed\n");
  4235. goto fail;
  4236. }
  4237. pci_set_drvdata(dev, scsi_host);
  4238. scsi_scan_host(scsi_host);
  4239. return 0;
  4240. fail:
  4241. if (acb != NULL)
  4242. adapter_uninit(acb);
  4243. if (scsi_host != NULL)
  4244. scsi_host_put(scsi_host);
  4245. pci_disable_device(dev);
  4246. return -ENODEV;
  4247. }
  4248. /**
  4249. * dc395x_remove_one - Called to remove a single instance of the
  4250. * adapter.
  4251. *
  4252. * @dev: The PCI device to initialize.
  4253. **/
  4254. static void __devexit dc395x_remove_one(struct pci_dev *dev)
  4255. {
  4256. struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
  4257. struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
  4258. dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
  4259. scsi_remove_host(scsi_host);
  4260. adapter_uninit(acb);
  4261. pci_disable_device(dev);
  4262. scsi_host_put(scsi_host);
  4263. pci_set_drvdata(dev, NULL);
  4264. }
  4265. static struct pci_device_id dc395x_pci_table[] = {
  4266. {
  4267. .vendor = PCI_VENDOR_ID_TEKRAM,
  4268. .device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
  4269. .subvendor = PCI_ANY_ID,
  4270. .subdevice = PCI_ANY_ID,
  4271. },
  4272. {} /* Terminating entry */
  4273. };
  4274. MODULE_DEVICE_TABLE(pci, dc395x_pci_table);
  4275. static struct pci_driver dc395x_driver = {
  4276. .name = DC395X_NAME,
  4277. .id_table = dc395x_pci_table,
  4278. .probe = dc395x_init_one,
  4279. .remove = __devexit_p(dc395x_remove_one),
  4280. };
  4281. /**
  4282. * dc395x_module_init - Module initialization function
  4283. *
  4284. * Used by both module and built-in driver to initialise this driver.
  4285. **/
  4286. static int __init dc395x_module_init(void)
  4287. {
  4288. return pci_register_driver(&dc395x_driver);
  4289. }
  4290. /**
  4291. * dc395x_module_exit - Module cleanup function.
  4292. **/
  4293. static void __exit dc395x_module_exit(void)
  4294. {
  4295. pci_unregister_driver(&dc395x_driver);
  4296. }
  4297. module_init(dc395x_module_init);
  4298. module_exit(dc395x_module_exit);
  4299. MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
  4300. MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
  4301. MODULE_LICENSE("GPL");