sym_hipd.c 144 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839
  1. /*
  2. * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family
  3. * of PCI-SCSI IO processors.
  4. *
  5. * Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>
  6. * Copyright (c) 2003-2005 Matthew Wilcox <matthew@wil.cx>
  7. *
  8. * This driver is derived from the Linux sym53c8xx driver.
  9. * Copyright (C) 1998-2000 Gerard Roudier
  10. *
  11. * The sym53c8xx driver is derived from the ncr53c8xx driver that had been
  12. * a port of the FreeBSD ncr driver to Linux-1.2.13.
  13. *
  14. * The original ncr driver has been written for 386bsd and FreeBSD by
  15. * Wolfgang Stanglmeier <wolf@cologne.de>
  16. * Stefan Esser <se@mi.Uni-Koeln.de>
  17. * Copyright (C) 1994 Wolfgang Stanglmeier
  18. *
  19. * Other major contributions:
  20. *
  21. * NVRAM detection and reading.
  22. * Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk>
  23. *
  24. *-----------------------------------------------------------------------------
  25. *
  26. * This program is free software; you can redistribute it and/or modify
  27. * it under the terms of the GNU General Public License as published by
  28. * the Free Software Foundation; either version 2 of the License, or
  29. * (at your option) any later version.
  30. *
  31. * This program is distributed in the hope that it will be useful,
  32. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  33. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  34. * GNU General Public License for more details.
  35. *
  36. * You should have received a copy of the GNU General Public License
  37. * along with this program; if not, write to the Free Software
  38. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  39. */
  40. #include <linux/slab.h>
  41. #include <asm/param.h> /* for timeouts in units of HZ */
  42. #include "sym_glue.h"
  43. #include "sym_nvram.h"
  44. #if 0
  45. #define SYM_DEBUG_GENERIC_SUPPORT
  46. #endif
  47. /*
  48. * Needed function prototypes.
  49. */
  50. static void sym_int_ma (struct sym_hcb *np);
  51. static void sym_int_sir(struct sym_hcb *);
  52. static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np);
  53. static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa);
  54. static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln);
  55. static void sym_complete_error (struct sym_hcb *np, struct sym_ccb *cp);
  56. static void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp);
  57. static int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp);
  58. /*
  59. * Print a buffer in hexadecimal format with a ".\n" at end.
  60. */
  61. static void sym_printl_hex(u_char *p, int n)
  62. {
  63. while (n-- > 0)
  64. printf (" %x", *p++);
  65. printf (".\n");
  66. }
  67. static void sym_print_msg(struct sym_ccb *cp, char *label, u_char *msg)
  68. {
  69. sym_print_addr(cp->cmd, "%s: ", label);
  70. spi_print_msg(msg);
  71. printf("\n");
  72. }
  73. static void sym_print_nego_msg(struct sym_hcb *np, int target, char *label, u_char *msg)
  74. {
  75. struct sym_tcb *tp = &np->target[target];
  76. dev_info(&tp->starget->dev, "%s: ", label);
  77. spi_print_msg(msg);
  78. printf("\n");
  79. }
  80. /*
  81. * Print something that tells about extended errors.
  82. */
  83. void sym_print_xerr(struct scsi_cmnd *cmd, int x_status)
  84. {
  85. if (x_status & XE_PARITY_ERR) {
  86. sym_print_addr(cmd, "unrecovered SCSI parity error.\n");
  87. }
  88. if (x_status & XE_EXTRA_DATA) {
  89. sym_print_addr(cmd, "extraneous data discarded.\n");
  90. }
  91. if (x_status & XE_BAD_PHASE) {
  92. sym_print_addr(cmd, "illegal scsi phase (4/5).\n");
  93. }
  94. if (x_status & XE_SODL_UNRUN) {
  95. sym_print_addr(cmd, "ODD transfer in DATA OUT phase.\n");
  96. }
  97. if (x_status & XE_SWIDE_OVRUN) {
  98. sym_print_addr(cmd, "ODD transfer in DATA IN phase.\n");
  99. }
  100. }
  101. /*
  102. * Return a string for SCSI BUS mode.
  103. */
  104. static char *sym_scsi_bus_mode(int mode)
  105. {
  106. switch(mode) {
  107. case SMODE_HVD: return "HVD";
  108. case SMODE_SE: return "SE";
  109. case SMODE_LVD: return "LVD";
  110. }
  111. return "??";
  112. }
  113. /*
  114. * Soft reset the chip.
  115. *
  116. * Raising SRST when the chip is running may cause
  117. * problems on dual function chips (see below).
  118. * On the other hand, LVD devices need some delay
  119. * to settle and report actual BUS mode in STEST4.
  120. */
  121. static void sym_chip_reset (struct sym_hcb *np)
  122. {
  123. OUTB(np, nc_istat, SRST);
  124. INB(np, nc_mbox1);
  125. udelay(10);
  126. OUTB(np, nc_istat, 0);
  127. INB(np, nc_mbox1);
  128. udelay(2000); /* For BUS MODE to settle */
  129. }
  130. /*
  131. * Really soft reset the chip.:)
  132. *
  133. * Some 896 and 876 chip revisions may hang-up if we set
  134. * the SRST (soft reset) bit at the wrong time when SCRIPTS
  135. * are running.
  136. * So, we need to abort the current operation prior to
  137. * soft resetting the chip.
  138. */
  139. static void sym_soft_reset (struct sym_hcb *np)
  140. {
  141. u_char istat = 0;
  142. int i;
  143. if (!(np->features & FE_ISTAT1) || !(INB(np, nc_istat1) & SCRUN))
  144. goto do_chip_reset;
  145. OUTB(np, nc_istat, CABRT);
  146. for (i = 100000 ; i ; --i) {
  147. istat = INB(np, nc_istat);
  148. if (istat & SIP) {
  149. INW(np, nc_sist);
  150. }
  151. else if (istat & DIP) {
  152. if (INB(np, nc_dstat) & ABRT)
  153. break;
  154. }
  155. udelay(5);
  156. }
  157. OUTB(np, nc_istat, 0);
  158. if (!i)
  159. printf("%s: unable to abort current chip operation, "
  160. "ISTAT=0x%02x.\n", sym_name(np), istat);
  161. do_chip_reset:
  162. sym_chip_reset(np);
  163. }
  164. /*
  165. * Start reset process.
  166. *
  167. * The interrupt handler will reinitialize the chip.
  168. */
  169. static void sym_start_reset(struct sym_hcb *np)
  170. {
  171. sym_reset_scsi_bus(np, 1);
  172. }
  173. int sym_reset_scsi_bus(struct sym_hcb *np, int enab_int)
  174. {
  175. u32 term;
  176. int retv = 0;
  177. sym_soft_reset(np); /* Soft reset the chip */
  178. if (enab_int)
  179. OUTW(np, nc_sien, RST);
  180. /*
  181. * Enable Tolerant, reset IRQD if present and
  182. * properly set IRQ mode, prior to resetting the bus.
  183. */
  184. OUTB(np, nc_stest3, TE);
  185. OUTB(np, nc_dcntl, (np->rv_dcntl & IRQM));
  186. OUTB(np, nc_scntl1, CRST);
  187. INB(np, nc_mbox1);
  188. udelay(200);
  189. if (!SYM_SETUP_SCSI_BUS_CHECK)
  190. goto out;
  191. /*
  192. * Check for no terminators or SCSI bus shorts to ground.
  193. * Read SCSI data bus, data parity bits and control signals.
  194. * We are expecting RESET to be TRUE and other signals to be
  195. * FALSE.
  196. */
  197. term = INB(np, nc_sstat0);
  198. term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */
  199. term |= ((INB(np, nc_sstat2) & 0x01) << 26) | /* sdp1 */
  200. ((INW(np, nc_sbdl) & 0xff) << 9) | /* d7-0 */
  201. ((INW(np, nc_sbdl) & 0xff00) << 10) | /* d15-8 */
  202. INB(np, nc_sbcl); /* req ack bsy sel atn msg cd io */
  203. if (!np->maxwide)
  204. term &= 0x3ffff;
  205. if (term != (2<<7)) {
  206. printf("%s: suspicious SCSI data while resetting the BUS.\n",
  207. sym_name(np));
  208. printf("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = "
  209. "0x%lx, expecting 0x%lx\n",
  210. sym_name(np),
  211. (np->features & FE_WIDE) ? "dp1,d15-8," : "",
  212. (u_long)term, (u_long)(2<<7));
  213. if (SYM_SETUP_SCSI_BUS_CHECK == 1)
  214. retv = 1;
  215. }
  216. out:
  217. OUTB(np, nc_scntl1, 0);
  218. return retv;
  219. }
  220. /*
  221. * Select SCSI clock frequency
  222. */
  223. static void sym_selectclock(struct sym_hcb *np, u_char scntl3)
  224. {
  225. /*
  226. * If multiplier not present or not selected, leave here.
  227. */
  228. if (np->multiplier <= 1) {
  229. OUTB(np, nc_scntl3, scntl3);
  230. return;
  231. }
  232. if (sym_verbose >= 2)
  233. printf ("%s: enabling clock multiplier\n", sym_name(np));
  234. OUTB(np, nc_stest1, DBLEN); /* Enable clock multiplier */
  235. /*
  236. * Wait for the LCKFRQ bit to be set if supported by the chip.
  237. * Otherwise wait 50 micro-seconds (at least).
  238. */
  239. if (np->features & FE_LCKFRQ) {
  240. int i = 20;
  241. while (!(INB(np, nc_stest4) & LCKFRQ) && --i > 0)
  242. udelay(20);
  243. if (!i)
  244. printf("%s: the chip cannot lock the frequency\n",
  245. sym_name(np));
  246. } else {
  247. INB(np, nc_mbox1);
  248. udelay(50+10);
  249. }
  250. OUTB(np, nc_stest3, HSC); /* Halt the scsi clock */
  251. OUTB(np, nc_scntl3, scntl3);
  252. OUTB(np, nc_stest1, (DBLEN|DBLSEL));/* Select clock multiplier */
  253. OUTB(np, nc_stest3, 0x00); /* Restart scsi clock */
  254. }
  255. /*
  256. * Determine the chip's clock frequency.
  257. *
  258. * This is essential for the negotiation of the synchronous
  259. * transfer rate.
  260. *
  261. * Note: we have to return the correct value.
  262. * THERE IS NO SAFE DEFAULT VALUE.
  263. *
  264. * Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock.
  265. * 53C860 and 53C875 rev. 1 support fast20 transfers but
  266. * do not have a clock doubler and so are provided with a
  267. * 80 MHz clock. All other fast20 boards incorporate a doubler
  268. * and so should be delivered with a 40 MHz clock.
  269. * The recent fast40 chips (895/896/895A/1010) use a 40 Mhz base
  270. * clock and provide a clock quadrupler (160 Mhz).
  271. */
  272. /*
  273. * calculate SCSI clock frequency (in KHz)
  274. */
  275. static unsigned getfreq (struct sym_hcb *np, int gen)
  276. {
  277. unsigned int ms = 0;
  278. unsigned int f;
  279. /*
  280. * Measure GEN timer delay in order
  281. * to calculate SCSI clock frequency
  282. *
  283. * This code will never execute too
  284. * many loop iterations (if DELAY is
  285. * reasonably correct). It could get
  286. * too low a delay (too high a freq.)
  287. * if the CPU is slow executing the
  288. * loop for some reason (an NMI, for
  289. * example). For this reason we will
  290. * if multiple measurements are to be
  291. * performed trust the higher delay
  292. * (lower frequency returned).
  293. */
  294. OUTW(np, nc_sien, 0); /* mask all scsi interrupts */
  295. INW(np, nc_sist); /* clear pending scsi interrupt */
  296. OUTB(np, nc_dien, 0); /* mask all dma interrupts */
  297. INW(np, nc_sist); /* another one, just to be sure :) */
  298. /*
  299. * The C1010-33 core does not report GEN in SIST,
  300. * if this interrupt is masked in SIEN.
  301. * I don't know yet if the C1010-66 behaves the same way.
  302. */
  303. if (np->features & FE_C10) {
  304. OUTW(np, nc_sien, GEN);
  305. OUTB(np, nc_istat1, SIRQD);
  306. }
  307. OUTB(np, nc_scntl3, 4); /* set pre-scaler to divide by 3 */
  308. OUTB(np, nc_stime1, 0); /* disable general purpose timer */
  309. OUTB(np, nc_stime1, gen); /* set to nominal delay of 1<<gen * 125us */
  310. while (!(INW(np, nc_sist) & GEN) && ms++ < 100000)
  311. udelay(1000/4); /* count in 1/4 of ms */
  312. OUTB(np, nc_stime1, 0); /* disable general purpose timer */
  313. /*
  314. * Undo C1010-33 specific settings.
  315. */
  316. if (np->features & FE_C10) {
  317. OUTW(np, nc_sien, 0);
  318. OUTB(np, nc_istat1, 0);
  319. }
  320. /*
  321. * set prescaler to divide by whatever 0 means
  322. * 0 ought to choose divide by 2, but appears
  323. * to set divide by 3.5 mode in my 53c810 ...
  324. */
  325. OUTB(np, nc_scntl3, 0);
  326. /*
  327. * adjust for prescaler, and convert into KHz
  328. */
  329. f = ms ? ((1 << gen) * (4340*4)) / ms : 0;
  330. /*
  331. * The C1010-33 result is biased by a factor
  332. * of 2/3 compared to earlier chips.
  333. */
  334. if (np->features & FE_C10)
  335. f = (f * 2) / 3;
  336. if (sym_verbose >= 2)
  337. printf ("%s: Delay (GEN=%d): %u msec, %u KHz\n",
  338. sym_name(np), gen, ms/4, f);
  339. return f;
  340. }
  341. static unsigned sym_getfreq (struct sym_hcb *np)
  342. {
  343. u_int f1, f2;
  344. int gen = 8;
  345. getfreq (np, gen); /* throw away first result */
  346. f1 = getfreq (np, gen);
  347. f2 = getfreq (np, gen);
  348. if (f1 > f2) f1 = f2; /* trust lower result */
  349. return f1;
  350. }
  351. /*
  352. * Get/probe chip SCSI clock frequency
  353. */
  354. static void sym_getclock (struct sym_hcb *np, int mult)
  355. {
  356. unsigned char scntl3 = np->sv_scntl3;
  357. unsigned char stest1 = np->sv_stest1;
  358. unsigned f1;
  359. np->multiplier = 1;
  360. f1 = 40000;
  361. /*
  362. * True with 875/895/896/895A with clock multiplier selected
  363. */
  364. if (mult > 1 && (stest1 & (DBLEN+DBLSEL)) == DBLEN+DBLSEL) {
  365. if (sym_verbose >= 2)
  366. printf ("%s: clock multiplier found\n", sym_name(np));
  367. np->multiplier = mult;
  368. }
  369. /*
  370. * If multiplier not found or scntl3 not 7,5,3,
  371. * reset chip and get frequency from general purpose timer.
  372. * Otherwise trust scntl3 BIOS setting.
  373. */
  374. if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) {
  375. OUTB(np, nc_stest1, 0); /* make sure doubler is OFF */
  376. f1 = sym_getfreq (np);
  377. if (sym_verbose)
  378. printf ("%s: chip clock is %uKHz\n", sym_name(np), f1);
  379. if (f1 < 45000) f1 = 40000;
  380. else if (f1 < 55000) f1 = 50000;
  381. else f1 = 80000;
  382. if (f1 < 80000 && mult > 1) {
  383. if (sym_verbose >= 2)
  384. printf ("%s: clock multiplier assumed\n",
  385. sym_name(np));
  386. np->multiplier = mult;
  387. }
  388. } else {
  389. if ((scntl3 & 7) == 3) f1 = 40000;
  390. else if ((scntl3 & 7) == 5) f1 = 80000;
  391. else f1 = 160000;
  392. f1 /= np->multiplier;
  393. }
  394. /*
  395. * Compute controller synchronous parameters.
  396. */
  397. f1 *= np->multiplier;
  398. np->clock_khz = f1;
  399. }
  400. /*
  401. * Get/probe PCI clock frequency
  402. */
  403. static int sym_getpciclock (struct sym_hcb *np)
  404. {
  405. int f = 0;
  406. /*
  407. * For now, we only need to know about the actual
  408. * PCI BUS clock frequency for C1010-66 chips.
  409. */
  410. #if 1
  411. if (np->features & FE_66MHZ) {
  412. #else
  413. if (1) {
  414. #endif
  415. OUTB(np, nc_stest1, SCLK); /* Use the PCI clock as SCSI clock */
  416. f = sym_getfreq(np);
  417. OUTB(np, nc_stest1, 0);
  418. }
  419. np->pciclk_khz = f;
  420. return f;
  421. }
  422. /*
  423. * SYMBIOS chip clock divisor table.
  424. *
  425. * Divisors are multiplied by 10,000,000 in order to make
  426. * calculations more simple.
  427. */
  428. #define _5M 5000000
  429. static const u32 div_10M[] = {2*_5M, 3*_5M, 4*_5M, 6*_5M, 8*_5M, 12*_5M, 16*_5M};
  430. /*
  431. * Get clock factor and sync divisor for a given
  432. * synchronous factor period.
  433. */
  434. static int
  435. sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fakp)
  436. {
  437. u32 clk = np->clock_khz; /* SCSI clock frequency in kHz */
  438. int div = np->clock_divn; /* Number of divisors supported */
  439. u32 fak; /* Sync factor in sxfer */
  440. u32 per; /* Period in tenths of ns */
  441. u32 kpc; /* (per * clk) */
  442. int ret;
  443. /*
  444. * Compute the synchronous period in tenths of nano-seconds
  445. */
  446. if (dt && sfac <= 9) per = 125;
  447. else if (sfac <= 10) per = 250;
  448. else if (sfac == 11) per = 303;
  449. else if (sfac == 12) per = 500;
  450. else per = 40 * sfac;
  451. ret = per;
  452. kpc = per * clk;
  453. if (dt)
  454. kpc <<= 1;
  455. /*
  456. * For earliest C10 revision 0, we cannot use extra
  457. * clocks for the setting of the SCSI clocking.
  458. * Note that this limits the lowest sync data transfer
  459. * to 5 Mega-transfers per second and may result in
  460. * using higher clock divisors.
  461. */
  462. #if 1
  463. if ((np->features & (FE_C10|FE_U3EN)) == FE_C10) {
  464. /*
  465. * Look for the lowest clock divisor that allows an
  466. * output speed not faster than the period.
  467. */
  468. while (div > 0) {
  469. --div;
  470. if (kpc > (div_10M[div] << 2)) {
  471. ++div;
  472. break;
  473. }
  474. }
  475. fak = 0; /* No extra clocks */
  476. if (div == np->clock_divn) { /* Are we too fast ? */
  477. ret = -1;
  478. }
  479. *divp = div;
  480. *fakp = fak;
  481. return ret;
  482. }
  483. #endif
  484. /*
  485. * Look for the greatest clock divisor that allows an
  486. * input speed faster than the period.
  487. */
  488. while (div-- > 0)
  489. if (kpc >= (div_10M[div] << 2)) break;
  490. /*
  491. * Calculate the lowest clock factor that allows an output
  492. * speed not faster than the period, and the max output speed.
  493. * If fak >= 1 we will set both XCLKH_ST and XCLKH_DT.
  494. * If fak >= 2 we will also set XCLKS_ST and XCLKS_DT.
  495. */
  496. if (dt) {
  497. fak = (kpc - 1) / (div_10M[div] << 1) + 1 - 2;
  498. /* ret = ((2+fak)*div_10M[div])/np->clock_khz; */
  499. } else {
  500. fak = (kpc - 1) / div_10M[div] + 1 - 4;
  501. /* ret = ((4+fak)*div_10M[div])/np->clock_khz; */
  502. }
  503. /*
  504. * Check against our hardware limits, or bugs :).
  505. */
  506. if (fak > 2) {
  507. fak = 2;
  508. ret = -1;
  509. }
  510. /*
  511. * Compute and return sync parameters.
  512. */
  513. *divp = div;
  514. *fakp = fak;
  515. return ret;
  516. }
  517. /*
  518. * SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64,
  519. * 128 transfers. All chips support at least 16 transfers
  520. * bursts. The 825A, 875 and 895 chips support bursts of up
  521. * to 128 transfers and the 895A and 896 support bursts of up
  522. * to 64 transfers. All other chips support up to 16
  523. * transfers bursts.
  524. *
  525. * For PCI 32 bit data transfers each transfer is a DWORD.
  526. * It is a QUADWORD (8 bytes) for PCI 64 bit data transfers.
  527. *
  528. * We use log base 2 (burst length) as internal code, with
  529. * value 0 meaning "burst disabled".
  530. */
  531. /*
  532. * Burst length from burst code.
  533. */
  534. #define burst_length(bc) (!(bc))? 0 : 1 << (bc)
  535. /*
  536. * Burst code from io register bits.
  537. */
  538. #define burst_code(dmode, ctest4, ctest5) \
  539. (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1
  540. /*
  541. * Set initial io register bits from burst code.
  542. */
  543. static inline void sym_init_burst(struct sym_hcb *np, u_char bc)
  544. {
  545. np->rv_ctest4 &= ~0x80;
  546. np->rv_dmode &= ~(0x3 << 6);
  547. np->rv_ctest5 &= ~0x4;
  548. if (!bc) {
  549. np->rv_ctest4 |= 0x80;
  550. }
  551. else {
  552. --bc;
  553. np->rv_dmode |= ((bc & 0x3) << 6);
  554. np->rv_ctest5 |= (bc & 0x4);
  555. }
  556. }
  557. /*
  558. * Save initial settings of some IO registers.
  559. * Assumed to have been set by BIOS.
  560. * We cannot reset the chip prior to reading the
  561. * IO registers, since informations will be lost.
  562. * Since the SCRIPTS processor may be running, this
  563. * is not safe on paper, but it seems to work quite
  564. * well. :)
  565. */
  566. static void sym_save_initial_setting (struct sym_hcb *np)
  567. {
  568. np->sv_scntl0 = INB(np, nc_scntl0) & 0x0a;
  569. np->sv_scntl3 = INB(np, nc_scntl3) & 0x07;
  570. np->sv_dmode = INB(np, nc_dmode) & 0xce;
  571. np->sv_dcntl = INB(np, nc_dcntl) & 0xa8;
  572. np->sv_ctest3 = INB(np, nc_ctest3) & 0x01;
  573. np->sv_ctest4 = INB(np, nc_ctest4) & 0x80;
  574. np->sv_gpcntl = INB(np, nc_gpcntl);
  575. np->sv_stest1 = INB(np, nc_stest1);
  576. np->sv_stest2 = INB(np, nc_stest2) & 0x20;
  577. np->sv_stest4 = INB(np, nc_stest4);
  578. if (np->features & FE_C10) { /* Always large DMA fifo + ultra3 */
  579. np->sv_scntl4 = INB(np, nc_scntl4);
  580. np->sv_ctest5 = INB(np, nc_ctest5) & 0x04;
  581. }
  582. else
  583. np->sv_ctest5 = INB(np, nc_ctest5) & 0x24;
  584. }
  585. /*
  586. * Set SCSI BUS mode.
  587. * - LVD capable chips (895/895A/896/1010) report the current BUS mode
  588. * through the STEST4 IO register.
  589. * - For previous generation chips (825/825A/875), the user has to tell us
  590. * how to check against HVD, since a 100% safe algorithm is not possible.
  591. */
  592. static void sym_set_bus_mode(struct sym_hcb *np, struct sym_nvram *nvram)
  593. {
  594. if (np->scsi_mode)
  595. return;
  596. np->scsi_mode = SMODE_SE;
  597. if (np->features & (FE_ULTRA2|FE_ULTRA3))
  598. np->scsi_mode = (np->sv_stest4 & SMODE);
  599. else if (np->features & FE_DIFF) {
  600. if (SYM_SETUP_SCSI_DIFF == 1) {
  601. if (np->sv_scntl3) {
  602. if (np->sv_stest2 & 0x20)
  603. np->scsi_mode = SMODE_HVD;
  604. } else if (nvram->type == SYM_SYMBIOS_NVRAM) {
  605. if (!(INB(np, nc_gpreg) & 0x08))
  606. np->scsi_mode = SMODE_HVD;
  607. }
  608. } else if (SYM_SETUP_SCSI_DIFF == 2)
  609. np->scsi_mode = SMODE_HVD;
  610. }
  611. if (np->scsi_mode == SMODE_HVD)
  612. np->rv_stest2 |= 0x20;
  613. }
  614. /*
  615. * Prepare io register values used by sym_start_up()
  616. * according to selected and supported features.
  617. */
  618. static int sym_prepare_setting(struct Scsi_Host *shost, struct sym_hcb *np, struct sym_nvram *nvram)
  619. {
  620. struct sym_data *sym_data = shost_priv(shost);
  621. struct pci_dev *pdev = sym_data->pdev;
  622. u_char burst_max;
  623. u32 period;
  624. int i;
  625. np->maxwide = (np->features & FE_WIDE) ? 1 : 0;
  626. /*
  627. * Guess the frequency of the chip's clock.
  628. */
  629. if (np->features & (FE_ULTRA3 | FE_ULTRA2))
  630. np->clock_khz = 160000;
  631. else if (np->features & FE_ULTRA)
  632. np->clock_khz = 80000;
  633. else
  634. np->clock_khz = 40000;
  635. /*
  636. * Get the clock multiplier factor.
  637. */
  638. if (np->features & FE_QUAD)
  639. np->multiplier = 4;
  640. else if (np->features & FE_DBLR)
  641. np->multiplier = 2;
  642. else
  643. np->multiplier = 1;
  644. /*
  645. * Measure SCSI clock frequency for chips
  646. * it may vary from assumed one.
  647. */
  648. if (np->features & FE_VARCLK)
  649. sym_getclock(np, np->multiplier);
  650. /*
  651. * Divisor to be used for async (timer pre-scaler).
  652. */
  653. i = np->clock_divn - 1;
  654. while (--i >= 0) {
  655. if (10ul * SYM_CONF_MIN_ASYNC * np->clock_khz > div_10M[i]) {
  656. ++i;
  657. break;
  658. }
  659. }
  660. np->rv_scntl3 = i+1;
  661. /*
  662. * The C1010 uses hardwired divisors for async.
  663. * So, we just throw away, the async. divisor.:-)
  664. */
  665. if (np->features & FE_C10)
  666. np->rv_scntl3 = 0;
  667. /*
  668. * Minimum synchronous period factor supported by the chip.
  669. * Btw, 'period' is in tenths of nanoseconds.
  670. */
  671. period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz;
  672. if (period <= 250) np->minsync = 10;
  673. else if (period <= 303) np->minsync = 11;
  674. else if (period <= 500) np->minsync = 12;
  675. else np->minsync = (period + 40 - 1) / 40;
  676. /*
  677. * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2).
  678. */
  679. if (np->minsync < 25 &&
  680. !(np->features & (FE_ULTRA|FE_ULTRA2|FE_ULTRA3)))
  681. np->minsync = 25;
  682. else if (np->minsync < 12 &&
  683. !(np->features & (FE_ULTRA2|FE_ULTRA3)))
  684. np->minsync = 12;
  685. /*
  686. * Maximum synchronous period factor supported by the chip.
  687. */
  688. period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz);
  689. np->maxsync = period > 2540 ? 254 : period / 10;
  690. /*
  691. * If chip is a C1010, guess the sync limits in DT mode.
  692. */
  693. if ((np->features & (FE_C10|FE_ULTRA3)) == (FE_C10|FE_ULTRA3)) {
  694. if (np->clock_khz == 160000) {
  695. np->minsync_dt = 9;
  696. np->maxsync_dt = 50;
  697. np->maxoffs_dt = nvram->type ? 62 : 31;
  698. }
  699. }
  700. /*
  701. * 64 bit addressing (895A/896/1010) ?
  702. */
  703. if (np->features & FE_DAC) {
  704. if (!use_dac(np))
  705. np->rv_ccntl1 |= (DDAC);
  706. else if (SYM_CONF_DMA_ADDRESSING_MODE == 1)
  707. np->rv_ccntl1 |= (XTIMOD | EXTIBMV);
  708. else if (SYM_CONF_DMA_ADDRESSING_MODE == 2)
  709. np->rv_ccntl1 |= (0 | EXTIBMV);
  710. }
  711. /*
  712. * Phase mismatch handled by SCRIPTS (895A/896/1010) ?
  713. */
  714. if (np->features & FE_NOPM)
  715. np->rv_ccntl0 |= (ENPMJ);
  716. /*
  717. * C1010-33 Errata: Part Number:609-039638 (rev. 1) is fixed.
  718. * In dual channel mode, contention occurs if internal cycles
  719. * are used. Disable internal cycles.
  720. */
  721. if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 &&
  722. pdev->revision < 0x1)
  723. np->rv_ccntl0 |= DILS;
  724. /*
  725. * Select burst length (dwords)
  726. */
  727. burst_max = SYM_SETUP_BURST_ORDER;
  728. if (burst_max == 255)
  729. burst_max = burst_code(np->sv_dmode, np->sv_ctest4,
  730. np->sv_ctest5);
  731. if (burst_max > 7)
  732. burst_max = 7;
  733. if (burst_max > np->maxburst)
  734. burst_max = np->maxburst;
  735. /*
  736. * DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2.
  737. * This chip and the 860 Rev 1 may wrongly use PCI cache line
  738. * based transactions on LOAD/STORE instructions. So we have
  739. * to prevent these chips from using such PCI transactions in
  740. * this driver. The generic ncr driver that does not use
  741. * LOAD/STORE instructions does not need this work-around.
  742. */
  743. if ((pdev->device == PCI_DEVICE_ID_NCR_53C810 &&
  744. pdev->revision >= 0x10 && pdev->revision <= 0x11) ||
  745. (pdev->device == PCI_DEVICE_ID_NCR_53C860 &&
  746. pdev->revision <= 0x1))
  747. np->features &= ~(FE_WRIE|FE_ERL|FE_ERMP);
  748. /*
  749. * Select all supported special features.
  750. * If we are using on-board RAM for scripts, prefetch (PFEN)
  751. * does not help, but burst op fetch (BOF) does.
  752. * Disabling PFEN makes sure BOF will be used.
  753. */
  754. if (np->features & FE_ERL)
  755. np->rv_dmode |= ERL; /* Enable Read Line */
  756. if (np->features & FE_BOF)
  757. np->rv_dmode |= BOF; /* Burst Opcode Fetch */
  758. if (np->features & FE_ERMP)
  759. np->rv_dmode |= ERMP; /* Enable Read Multiple */
  760. #if 1
  761. if ((np->features & FE_PFEN) && !np->ram_ba)
  762. #else
  763. if (np->features & FE_PFEN)
  764. #endif
  765. np->rv_dcntl |= PFEN; /* Prefetch Enable */
  766. if (np->features & FE_CLSE)
  767. np->rv_dcntl |= CLSE; /* Cache Line Size Enable */
  768. if (np->features & FE_WRIE)
  769. np->rv_ctest3 |= WRIE; /* Write and Invalidate */
  770. if (np->features & FE_DFS)
  771. np->rv_ctest5 |= DFS; /* Dma Fifo Size */
  772. /*
  773. * Select some other
  774. */
  775. np->rv_ctest4 |= MPEE; /* Master parity checking */
  776. np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */
  777. /*
  778. * Get parity checking, host ID and verbose mode from NVRAM
  779. */
  780. np->myaddr = 255;
  781. np->scsi_mode = 0;
  782. sym_nvram_setup_host(shost, np, nvram);
  783. /*
  784. * Get SCSI addr of host adapter (set by bios?).
  785. */
  786. if (np->myaddr == 255) {
  787. np->myaddr = INB(np, nc_scid) & 0x07;
  788. if (!np->myaddr)
  789. np->myaddr = SYM_SETUP_HOST_ID;
  790. }
  791. /*
  792. * Prepare initial io register bits for burst length
  793. */
  794. sym_init_burst(np, burst_max);
  795. sym_set_bus_mode(np, nvram);
  796. /*
  797. * Set LED support from SCRIPTS.
  798. * Ignore this feature for boards known to use a
  799. * specific GPIO wiring and for the 895A, 896
  800. * and 1010 that drive the LED directly.
  801. */
  802. if ((SYM_SETUP_SCSI_LED ||
  803. (nvram->type == SYM_SYMBIOS_NVRAM ||
  804. (nvram->type == SYM_TEKRAM_NVRAM &&
  805. pdev->device == PCI_DEVICE_ID_NCR_53C895))) &&
  806. !(np->features & FE_LEDC) && !(np->sv_gpcntl & 0x01))
  807. np->features |= FE_LED0;
  808. /*
  809. * Set irq mode.
  810. */
  811. switch(SYM_SETUP_IRQ_MODE & 3) {
  812. case 2:
  813. np->rv_dcntl |= IRQM;
  814. break;
  815. case 1:
  816. np->rv_dcntl |= (np->sv_dcntl & IRQM);
  817. break;
  818. default:
  819. break;
  820. }
  821. /*
  822. * Configure targets according to driver setup.
  823. * If NVRAM present get targets setup from NVRAM.
  824. */
  825. for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
  826. struct sym_tcb *tp = &np->target[i];
  827. tp->usrflags |= (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
  828. tp->usrtags = SYM_SETUP_MAX_TAG;
  829. tp->usr_width = np->maxwide;
  830. tp->usr_period = 9;
  831. sym_nvram_setup_target(tp, i, nvram);
  832. if (!tp->usrtags)
  833. tp->usrflags &= ~SYM_TAGS_ENABLED;
  834. }
  835. /*
  836. * Let user know about the settings.
  837. */
  838. printf("%s: %s, ID %d, Fast-%d, %s, %s\n", sym_name(np),
  839. sym_nvram_type(nvram), np->myaddr,
  840. (np->features & FE_ULTRA3) ? 80 :
  841. (np->features & FE_ULTRA2) ? 40 :
  842. (np->features & FE_ULTRA) ? 20 : 10,
  843. sym_scsi_bus_mode(np->scsi_mode),
  844. (np->rv_scntl0 & 0xa) ? "parity checking" : "NO parity");
  845. /*
  846. * Tell him more on demand.
  847. */
  848. if (sym_verbose) {
  849. printf("%s: %s IRQ line driver%s\n",
  850. sym_name(np),
  851. np->rv_dcntl & IRQM ? "totem pole" : "open drain",
  852. np->ram_ba ? ", using on-chip SRAM" : "");
  853. printf("%s: using %s firmware.\n", sym_name(np), np->fw_name);
  854. if (np->features & FE_NOPM)
  855. printf("%s: handling phase mismatch from SCRIPTS.\n",
  856. sym_name(np));
  857. }
  858. /*
  859. * And still more.
  860. */
  861. if (sym_verbose >= 2) {
  862. printf ("%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
  863. "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
  864. sym_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl,
  865. np->sv_ctest3, np->sv_ctest4, np->sv_ctest5);
  866. printf ("%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = "
  867. "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n",
  868. sym_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl,
  869. np->rv_ctest3, np->rv_ctest4, np->rv_ctest5);
  870. }
  871. return 0;
  872. }
  873. /*
  874. * Test the pci bus snoop logic :-(
  875. *
  876. * Has to be called with interrupts disabled.
  877. */
  878. #ifdef CONFIG_SCSI_SYM53C8XX_MMIO
  879. static int sym_regtest(struct sym_hcb *np)
  880. {
  881. register volatile u32 data;
  882. /*
  883. * chip registers may NOT be cached.
  884. * write 0xffffffff to a read only register area,
  885. * and try to read it back.
  886. */
  887. data = 0xffffffff;
  888. OUTL(np, nc_dstat, data);
  889. data = INL(np, nc_dstat);
  890. #if 1
  891. if (data == 0xffffffff) {
  892. #else
  893. if ((data & 0xe2f0fffd) != 0x02000080) {
  894. #endif
  895. printf ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n",
  896. (unsigned) data);
  897. return 0x10;
  898. }
  899. return 0;
  900. }
  901. #else
  902. static inline int sym_regtest(struct sym_hcb *np)
  903. {
  904. return 0;
  905. }
  906. #endif
  907. static int sym_snooptest(struct sym_hcb *np)
  908. {
  909. u32 sym_rd, sym_wr, sym_bk, host_rd, host_wr, pc, dstat;
  910. int i, err;
  911. err = sym_regtest(np);
  912. if (err)
  913. return err;
  914. restart_test:
  915. /*
  916. * Enable Master Parity Checking as we intend
  917. * to enable it for normal operations.
  918. */
  919. OUTB(np, nc_ctest4, (np->rv_ctest4 & MPEE));
  920. /*
  921. * init
  922. */
  923. pc = SCRIPTZ_BA(np, snooptest);
  924. host_wr = 1;
  925. sym_wr = 2;
  926. /*
  927. * Set memory and register.
  928. */
  929. np->scratch = cpu_to_scr(host_wr);
  930. OUTL(np, nc_temp, sym_wr);
  931. /*
  932. * Start script (exchange values)
  933. */
  934. OUTL(np, nc_dsa, np->hcb_ba);
  935. OUTL_DSP(np, pc);
  936. /*
  937. * Wait 'til done (with timeout)
  938. */
  939. for (i=0; i<SYM_SNOOP_TIMEOUT; i++)
  940. if (INB(np, nc_istat) & (INTF|SIP|DIP))
  941. break;
  942. if (i>=SYM_SNOOP_TIMEOUT) {
  943. printf ("CACHE TEST FAILED: timeout.\n");
  944. return (0x20);
  945. }
  946. /*
  947. * Check for fatal DMA errors.
  948. */
  949. dstat = INB(np, nc_dstat);
  950. #if 1 /* Band aiding for broken hardwares that fail PCI parity */
  951. if ((dstat & MDPE) && (np->rv_ctest4 & MPEE)) {
  952. printf ("%s: PCI DATA PARITY ERROR DETECTED - "
  953. "DISABLING MASTER DATA PARITY CHECKING.\n",
  954. sym_name(np));
  955. np->rv_ctest4 &= ~MPEE;
  956. goto restart_test;
  957. }
  958. #endif
  959. if (dstat & (MDPE|BF|IID)) {
  960. printf ("CACHE TEST FAILED: DMA error (dstat=0x%02x).", dstat);
  961. return (0x80);
  962. }
  963. /*
  964. * Save termination position.
  965. */
  966. pc = INL(np, nc_dsp);
  967. /*
  968. * Read memory and register.
  969. */
  970. host_rd = scr_to_cpu(np->scratch);
  971. sym_rd = INL(np, nc_scratcha);
  972. sym_bk = INL(np, nc_temp);
  973. /*
  974. * Check termination position.
  975. */
  976. if (pc != SCRIPTZ_BA(np, snoopend)+8) {
  977. printf ("CACHE TEST FAILED: script execution failed.\n");
  978. printf ("start=%08lx, pc=%08lx, end=%08lx\n",
  979. (u_long) SCRIPTZ_BA(np, snooptest), (u_long) pc,
  980. (u_long) SCRIPTZ_BA(np, snoopend) +8);
  981. return (0x40);
  982. }
  983. /*
  984. * Show results.
  985. */
  986. if (host_wr != sym_rd) {
  987. printf ("CACHE TEST FAILED: host wrote %d, chip read %d.\n",
  988. (int) host_wr, (int) sym_rd);
  989. err |= 1;
  990. }
  991. if (host_rd != sym_wr) {
  992. printf ("CACHE TEST FAILED: chip wrote %d, host read %d.\n",
  993. (int) sym_wr, (int) host_rd);
  994. err |= 2;
  995. }
  996. if (sym_bk != sym_wr) {
  997. printf ("CACHE TEST FAILED: chip wrote %d, read back %d.\n",
  998. (int) sym_wr, (int) sym_bk);
  999. err |= 4;
  1000. }
  1001. return err;
  1002. }
  1003. /*
  1004. * log message for real hard errors
  1005. *
  1006. * sym0 targ 0?: ERROR (ds:si) (so-si-sd) (sx/s3/s4) @ name (dsp:dbc).
  1007. * reg: r0 r1 r2 r3 r4 r5 r6 ..... rf.
  1008. *
  1009. * exception register:
  1010. * ds: dstat
  1011. * si: sist
  1012. *
  1013. * SCSI bus lines:
  1014. * so: control lines as driven by chip.
  1015. * si: control lines as seen by chip.
  1016. * sd: scsi data lines as seen by chip.
  1017. *
  1018. * wide/fastmode:
  1019. * sx: sxfer (see the manual)
  1020. * s3: scntl3 (see the manual)
  1021. * s4: scntl4 (see the manual)
  1022. *
  1023. * current script command:
  1024. * dsp: script address (relative to start of script).
  1025. * dbc: first word of script command.
  1026. *
  1027. * First 24 register of the chip:
  1028. * r0..rf
  1029. */
  1030. static void sym_log_hard_error(struct Scsi_Host *shost, u_short sist, u_char dstat)
  1031. {
  1032. struct sym_hcb *np = sym_get_hcb(shost);
  1033. u32 dsp;
  1034. int script_ofs;
  1035. int script_size;
  1036. char *script_name;
  1037. u_char *script_base;
  1038. int i;
  1039. dsp = INL(np, nc_dsp);
  1040. if (dsp > np->scripta_ba &&
  1041. dsp <= np->scripta_ba + np->scripta_sz) {
  1042. script_ofs = dsp - np->scripta_ba;
  1043. script_size = np->scripta_sz;
  1044. script_base = (u_char *) np->scripta0;
  1045. script_name = "scripta";
  1046. }
  1047. else if (np->scriptb_ba < dsp &&
  1048. dsp <= np->scriptb_ba + np->scriptb_sz) {
  1049. script_ofs = dsp - np->scriptb_ba;
  1050. script_size = np->scriptb_sz;
  1051. script_base = (u_char *) np->scriptb0;
  1052. script_name = "scriptb";
  1053. } else {
  1054. script_ofs = dsp;
  1055. script_size = 0;
  1056. script_base = NULL;
  1057. script_name = "mem";
  1058. }
  1059. printf ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x/%x) @ (%s %x:%08x).\n",
  1060. sym_name(np), (unsigned)INB(np, nc_sdid)&0x0f, dstat, sist,
  1061. (unsigned)INB(np, nc_socl), (unsigned)INB(np, nc_sbcl),
  1062. (unsigned)INB(np, nc_sbdl), (unsigned)INB(np, nc_sxfer),
  1063. (unsigned)INB(np, nc_scntl3),
  1064. (np->features & FE_C10) ? (unsigned)INB(np, nc_scntl4) : 0,
  1065. script_name, script_ofs, (unsigned)INL(np, nc_dbc));
  1066. if (((script_ofs & 3) == 0) &&
  1067. (unsigned)script_ofs < script_size) {
  1068. printf ("%s: script cmd = %08x\n", sym_name(np),
  1069. scr_to_cpu((int) *(u32 *)(script_base + script_ofs)));
  1070. }
  1071. printf("%s: regdump:", sym_name(np));
  1072. for (i = 0; i < 24; i++)
  1073. printf(" %02x", (unsigned)INB_OFF(np, i));
  1074. printf(".\n");
  1075. /*
  1076. * PCI BUS error.
  1077. */
  1078. if (dstat & (MDPE|BF))
  1079. sym_log_bus_error(shost);
  1080. }
  1081. void sym_dump_registers(struct Scsi_Host *shost)
  1082. {
  1083. struct sym_hcb *np = sym_get_hcb(shost);
  1084. u_short sist;
  1085. u_char dstat;
  1086. sist = INW(np, nc_sist);
  1087. dstat = INB(np, nc_dstat);
  1088. sym_log_hard_error(shost, sist, dstat);
  1089. }
  1090. static struct sym_chip sym_dev_table[] = {
  1091. {PCI_DEVICE_ID_NCR_53C810, 0x0f, "810", 4, 8, 4, 64,
  1092. FE_ERL}
  1093. ,
  1094. #ifdef SYM_DEBUG_GENERIC_SUPPORT
  1095. {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1,
  1096. FE_BOF}
  1097. ,
  1098. #else
  1099. {PCI_DEVICE_ID_NCR_53C810, 0xff, "810a", 4, 8, 4, 1,
  1100. FE_CACHE_SET|FE_LDSTR|FE_PFEN|FE_BOF}
  1101. ,
  1102. #endif
  1103. {PCI_DEVICE_ID_NCR_53C815, 0xff, "815", 4, 8, 4, 64,
  1104. FE_BOF|FE_ERL}
  1105. ,
  1106. {PCI_DEVICE_ID_NCR_53C825, 0x0f, "825", 6, 8, 4, 64,
  1107. FE_WIDE|FE_BOF|FE_ERL|FE_DIFF}
  1108. ,
  1109. {PCI_DEVICE_ID_NCR_53C825, 0xff, "825a", 6, 8, 4, 2,
  1110. FE_WIDE|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|FE_RAM|FE_DIFF}
  1111. ,
  1112. {PCI_DEVICE_ID_NCR_53C860, 0xff, "860", 4, 8, 5, 1,
  1113. FE_ULTRA|FE_CACHE_SET|FE_BOF|FE_LDSTR|FE_PFEN}
  1114. ,
  1115. {PCI_DEVICE_ID_NCR_53C875, 0x01, "875", 6, 16, 5, 2,
  1116. FE_WIDE|FE_ULTRA|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
  1117. FE_RAM|FE_DIFF|FE_VARCLK}
  1118. ,
  1119. {PCI_DEVICE_ID_NCR_53C875, 0xff, "875", 6, 16, 5, 2,
  1120. FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
  1121. FE_RAM|FE_DIFF|FE_VARCLK}
  1122. ,
  1123. {PCI_DEVICE_ID_NCR_53C875J, 0xff, "875J", 6, 16, 5, 2,
  1124. FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
  1125. FE_RAM|FE_DIFF|FE_VARCLK}
  1126. ,
  1127. {PCI_DEVICE_ID_NCR_53C885, 0xff, "885", 6, 16, 5, 2,
  1128. FE_WIDE|FE_ULTRA|FE_DBLR|FE_CACHE0_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
  1129. FE_RAM|FE_DIFF|FE_VARCLK}
  1130. ,
  1131. #ifdef SYM_DEBUG_GENERIC_SUPPORT
  1132. {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2,
  1133. FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|
  1134. FE_RAM|FE_LCKFRQ}
  1135. ,
  1136. #else
  1137. {PCI_DEVICE_ID_NCR_53C895, 0xff, "895", 6, 31, 7, 2,
  1138. FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
  1139. FE_RAM|FE_LCKFRQ}
  1140. ,
  1141. #endif
  1142. {PCI_DEVICE_ID_NCR_53C896, 0xff, "896", 6, 31, 7, 4,
  1143. FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
  1144. FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
  1145. ,
  1146. {PCI_DEVICE_ID_LSI_53C895A, 0xff, "895a", 6, 31, 7, 4,
  1147. FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
  1148. FE_RAM|FE_RAM8K|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
  1149. ,
  1150. {PCI_DEVICE_ID_LSI_53C875A, 0xff, "875a", 6, 31, 7, 4,
  1151. FE_WIDE|FE_ULTRA|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
  1152. FE_RAM|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_LCKFRQ}
  1153. ,
  1154. {PCI_DEVICE_ID_LSI_53C1010_33, 0x00, "1010-33", 6, 31, 7, 8,
  1155. FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
  1156. FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
  1157. FE_C10}
  1158. ,
  1159. {PCI_DEVICE_ID_LSI_53C1010_33, 0xff, "1010-33", 6, 31, 7, 8,
  1160. FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
  1161. FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_CRC|
  1162. FE_C10|FE_U3EN}
  1163. ,
  1164. {PCI_DEVICE_ID_LSI_53C1010_66, 0xff, "1010-66", 6, 31, 7, 8,
  1165. FE_WIDE|FE_ULTRA3|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFBC|FE_LDSTR|FE_PFEN|
  1166. FE_RAM|FE_RAM8K|FE_64BIT|FE_DAC|FE_IO256|FE_NOPM|FE_LEDC|FE_66MHZ|FE_CRC|
  1167. FE_C10|FE_U3EN}
  1168. ,
  1169. {PCI_DEVICE_ID_LSI_53C1510, 0xff, "1510d", 6, 31, 7, 4,
  1170. FE_WIDE|FE_ULTRA2|FE_QUAD|FE_CACHE_SET|FE_BOF|FE_DFS|FE_LDSTR|FE_PFEN|
  1171. FE_RAM|FE_IO256|FE_LEDC}
  1172. };
  1173. #define sym_num_devs (ARRAY_SIZE(sym_dev_table))
  1174. /*
  1175. * Look up the chip table.
  1176. *
  1177. * Return a pointer to the chip entry if found,
  1178. * zero otherwise.
  1179. */
  1180. struct sym_chip *
  1181. sym_lookup_chip_table (u_short device_id, u_char revision)
  1182. {
  1183. struct sym_chip *chip;
  1184. int i;
  1185. for (i = 0; i < sym_num_devs; i++) {
  1186. chip = &sym_dev_table[i];
  1187. if (device_id != chip->device_id)
  1188. continue;
  1189. if (revision > chip->revision_id)
  1190. continue;
  1191. return chip;
  1192. }
  1193. return NULL;
  1194. }
  1195. #if SYM_CONF_DMA_ADDRESSING_MODE == 2
  1196. /*
  1197. * Lookup the 64 bit DMA segments map.
  1198. * This is only used if the direct mapping
  1199. * has been unsuccessful.
  1200. */
  1201. int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s)
  1202. {
  1203. int i;
  1204. if (!use_dac(np))
  1205. goto weird;
  1206. /* Look up existing mappings */
  1207. for (i = SYM_DMAP_SIZE-1; i > 0; i--) {
  1208. if (h == np->dmap_bah[i])
  1209. return i;
  1210. }
  1211. /* If direct mapping is free, get it */
  1212. if (!np->dmap_bah[s])
  1213. goto new;
  1214. /* Collision -> lookup free mappings */
  1215. for (s = SYM_DMAP_SIZE-1; s > 0; s--) {
  1216. if (!np->dmap_bah[s])
  1217. goto new;
  1218. }
  1219. weird:
  1220. panic("sym: ran out of 64 bit DMA segment registers");
  1221. return -1;
  1222. new:
  1223. np->dmap_bah[s] = h;
  1224. np->dmap_dirty = 1;
  1225. return s;
  1226. }
  1227. /*
  1228. * Update IO registers scratch C..R so they will be
  1229. * in sync. with queued CCB expectations.
  1230. */
  1231. static void sym_update_dmap_regs(struct sym_hcb *np)
  1232. {
  1233. int o, i;
  1234. if (!np->dmap_dirty)
  1235. return;
  1236. o = offsetof(struct sym_reg, nc_scrx[0]);
  1237. for (i = 0; i < SYM_DMAP_SIZE; i++) {
  1238. OUTL_OFF(np, o, np->dmap_bah[i]);
  1239. o += 4;
  1240. }
  1241. np->dmap_dirty = 0;
  1242. }
  1243. #endif
  1244. /* Enforce all the fiddly SPI rules and the chip limitations */
  1245. static void sym_check_goals(struct sym_hcb *np, struct scsi_target *starget,
  1246. struct sym_trans *goal)
  1247. {
  1248. if (!spi_support_wide(starget))
  1249. goal->width = 0;
  1250. if (!spi_support_sync(starget)) {
  1251. goal->iu = 0;
  1252. goal->dt = 0;
  1253. goal->qas = 0;
  1254. goal->offset = 0;
  1255. return;
  1256. }
  1257. if (spi_support_dt(starget)) {
  1258. if (spi_support_dt_only(starget))
  1259. goal->dt = 1;
  1260. if (goal->offset == 0)
  1261. goal->dt = 0;
  1262. } else {
  1263. goal->dt = 0;
  1264. }
  1265. /* Some targets fail to properly negotiate DT in SE mode */
  1266. if ((np->scsi_mode != SMODE_LVD) || !(np->features & FE_U3EN))
  1267. goal->dt = 0;
  1268. if (goal->dt) {
  1269. /* all DT transfers must be wide */
  1270. goal->width = 1;
  1271. if (goal->offset > np->maxoffs_dt)
  1272. goal->offset = np->maxoffs_dt;
  1273. if (goal->period < np->minsync_dt)
  1274. goal->period = np->minsync_dt;
  1275. if (goal->period > np->maxsync_dt)
  1276. goal->period = np->maxsync_dt;
  1277. } else {
  1278. goal->iu = goal->qas = 0;
  1279. if (goal->offset > np->maxoffs)
  1280. goal->offset = np->maxoffs;
  1281. if (goal->period < np->minsync)
  1282. goal->period = np->minsync;
  1283. if (goal->period > np->maxsync)
  1284. goal->period = np->maxsync;
  1285. }
  1286. }
  1287. /*
  1288. * Prepare the next negotiation message if needed.
  1289. *
  1290. * Fill in the part of message buffer that contains the
  1291. * negotiation and the nego_status field of the CCB.
  1292. * Returns the size of the message in bytes.
  1293. */
  1294. static int sym_prepare_nego(struct sym_hcb *np, struct sym_ccb *cp, u_char *msgptr)
  1295. {
  1296. struct sym_tcb *tp = &np->target[cp->target];
  1297. struct scsi_target *starget = tp->starget;
  1298. struct sym_trans *goal = &tp->tgoal;
  1299. int msglen = 0;
  1300. int nego;
  1301. sym_check_goals(np, starget, goal);
  1302. /*
  1303. * Many devices implement PPR in a buggy way, so only use it if we
  1304. * really want to.
  1305. */
  1306. if (goal->renego == NS_PPR || (goal->offset &&
  1307. (goal->iu || goal->dt || goal->qas || (goal->period < 0xa)))) {
  1308. nego = NS_PPR;
  1309. } else if (goal->renego == NS_WIDE || goal->width) {
  1310. nego = NS_WIDE;
  1311. } else if (goal->renego == NS_SYNC || goal->offset) {
  1312. nego = NS_SYNC;
  1313. } else {
  1314. goal->check_nego = 0;
  1315. nego = 0;
  1316. }
  1317. switch (nego) {
  1318. case NS_SYNC:
  1319. msglen += spi_populate_sync_msg(msgptr + msglen, goal->period,
  1320. goal->offset);
  1321. break;
  1322. case NS_WIDE:
  1323. msglen += spi_populate_width_msg(msgptr + msglen, goal->width);
  1324. break;
  1325. case NS_PPR:
  1326. msglen += spi_populate_ppr_msg(msgptr + msglen, goal->period,
  1327. goal->offset, goal->width,
  1328. (goal->iu ? PPR_OPT_IU : 0) |
  1329. (goal->dt ? PPR_OPT_DT : 0) |
  1330. (goal->qas ? PPR_OPT_QAS : 0));
  1331. break;
  1332. }
  1333. cp->nego_status = nego;
  1334. if (nego) {
  1335. tp->nego_cp = cp; /* Keep track a nego will be performed */
  1336. if (DEBUG_FLAGS & DEBUG_NEGO) {
  1337. sym_print_nego_msg(np, cp->target,
  1338. nego == NS_SYNC ? "sync msgout" :
  1339. nego == NS_WIDE ? "wide msgout" :
  1340. "ppr msgout", msgptr);
  1341. }
  1342. }
  1343. return msglen;
  1344. }
  1345. /*
  1346. * Insert a job into the start queue.
  1347. */
  1348. void sym_put_start_queue(struct sym_hcb *np, struct sym_ccb *cp)
  1349. {
  1350. u_short qidx;
  1351. #ifdef SYM_CONF_IARB_SUPPORT
  1352. /*
  1353. * If the previously queued CCB is not yet done,
  1354. * set the IARB hint. The SCRIPTS will go with IARB
  1355. * for this job when starting the previous one.
  1356. * We leave devices a chance to win arbitration by
  1357. * not using more than 'iarb_max' consecutive
  1358. * immediate arbitrations.
  1359. */
  1360. if (np->last_cp && np->iarb_count < np->iarb_max) {
  1361. np->last_cp->host_flags |= HF_HINT_IARB;
  1362. ++np->iarb_count;
  1363. }
  1364. else
  1365. np->iarb_count = 0;
  1366. np->last_cp = cp;
  1367. #endif
  1368. #if SYM_CONF_DMA_ADDRESSING_MODE == 2
  1369. /*
  1370. * Make SCRIPTS aware of the 64 bit DMA
  1371. * segment registers not being up-to-date.
  1372. */
  1373. if (np->dmap_dirty)
  1374. cp->host_xflags |= HX_DMAP_DIRTY;
  1375. #endif
  1376. /*
  1377. * Insert first the idle task and then our job.
  1378. * The MBs should ensure proper ordering.
  1379. */
  1380. qidx = np->squeueput + 2;
  1381. if (qidx >= MAX_QUEUE*2) qidx = 0;
  1382. np->squeue [qidx] = cpu_to_scr(np->idletask_ba);
  1383. MEMORY_WRITE_BARRIER();
  1384. np->squeue [np->squeueput] = cpu_to_scr(cp->ccb_ba);
  1385. np->squeueput = qidx;
  1386. if (DEBUG_FLAGS & DEBUG_QUEUE)
  1387. scmd_printk(KERN_DEBUG, cp->cmd, "queuepos=%d\n",
  1388. np->squeueput);
  1389. /*
  1390. * Script processor may be waiting for reselect.
  1391. * Wake it up.
  1392. */
  1393. MEMORY_WRITE_BARRIER();
  1394. OUTB(np, nc_istat, SIGP|np->istat_sem);
  1395. }
  1396. #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
  1397. /*
  1398. * Start next ready-to-start CCBs.
  1399. */
  1400. void sym_start_next_ccbs(struct sym_hcb *np, struct sym_lcb *lp, int maxn)
  1401. {
  1402. SYM_QUEHEAD *qp;
  1403. struct sym_ccb *cp;
  1404. /*
  1405. * Paranoia, as usual. :-)
  1406. */
  1407. assert(!lp->started_tags || !lp->started_no_tag);
  1408. /*
  1409. * Try to start as many commands as asked by caller.
  1410. * Prevent from having both tagged and untagged
  1411. * commands queued to the device at the same time.
  1412. */
  1413. while (maxn--) {
  1414. qp = sym_remque_head(&lp->waiting_ccbq);
  1415. if (!qp)
  1416. break;
  1417. cp = sym_que_entry(qp, struct sym_ccb, link2_ccbq);
  1418. if (cp->tag != NO_TAG) {
  1419. if (lp->started_no_tag ||
  1420. lp->started_tags >= lp->started_max) {
  1421. sym_insque_head(qp, &lp->waiting_ccbq);
  1422. break;
  1423. }
  1424. lp->itlq_tbl[cp->tag] = cpu_to_scr(cp->ccb_ba);
  1425. lp->head.resel_sa =
  1426. cpu_to_scr(SCRIPTA_BA(np, resel_tag));
  1427. ++lp->started_tags;
  1428. } else {
  1429. if (lp->started_no_tag || lp->started_tags) {
  1430. sym_insque_head(qp, &lp->waiting_ccbq);
  1431. break;
  1432. }
  1433. lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba);
  1434. lp->head.resel_sa =
  1435. cpu_to_scr(SCRIPTA_BA(np, resel_no_tag));
  1436. ++lp->started_no_tag;
  1437. }
  1438. cp->started = 1;
  1439. sym_insque_tail(qp, &lp->started_ccbq);
  1440. sym_put_start_queue(np, cp);
  1441. }
  1442. }
  1443. #endif /* SYM_OPT_HANDLE_DEVICE_QUEUEING */
  1444. /*
  1445. * The chip may have completed jobs. Look at the DONE QUEUE.
  1446. *
  1447. * On paper, memory read barriers may be needed here to
  1448. * prevent out of order LOADs by the CPU from having
  1449. * prefetched stale data prior to DMA having occurred.
  1450. */
  1451. static int sym_wakeup_done (struct sym_hcb *np)
  1452. {
  1453. struct sym_ccb *cp;
  1454. int i, n;
  1455. u32 dsa;
  1456. n = 0;
  1457. i = np->dqueueget;
  1458. /* MEMORY_READ_BARRIER(); */
  1459. while (1) {
  1460. dsa = scr_to_cpu(np->dqueue[i]);
  1461. if (!dsa)
  1462. break;
  1463. np->dqueue[i] = 0;
  1464. if ((i = i+2) >= MAX_QUEUE*2)
  1465. i = 0;
  1466. cp = sym_ccb_from_dsa(np, dsa);
  1467. if (cp) {
  1468. MEMORY_READ_BARRIER();
  1469. sym_complete_ok (np, cp);
  1470. ++n;
  1471. }
  1472. else
  1473. printf ("%s: bad DSA (%x) in done queue.\n",
  1474. sym_name(np), (u_int) dsa);
  1475. }
  1476. np->dqueueget = i;
  1477. return n;
  1478. }
  1479. /*
  1480. * Complete all CCBs queued to the COMP queue.
  1481. *
  1482. * These CCBs are assumed:
  1483. * - Not to be referenced either by devices or
  1484. * SCRIPTS-related queues and datas.
  1485. * - To have to be completed with an error condition
  1486. * or requeued.
  1487. *
  1488. * The device queue freeze count is incremented
  1489. * for each CCB that does not prevent this.
  1490. * This function is called when all CCBs involved
  1491. * in error handling/recovery have been reaped.
  1492. */
  1493. static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status)
  1494. {
  1495. SYM_QUEHEAD *qp;
  1496. struct sym_ccb *cp;
  1497. while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) {
  1498. struct scsi_cmnd *cmd;
  1499. cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
  1500. sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
  1501. /* Leave quiet CCBs waiting for resources */
  1502. if (cp->host_status == HS_WAIT)
  1503. continue;
  1504. cmd = cp->cmd;
  1505. if (cam_status)
  1506. sym_set_cam_status(cmd, cam_status);
  1507. #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
  1508. if (sym_get_cam_status(cmd) == DID_SOFT_ERROR) {
  1509. struct sym_tcb *tp = &np->target[cp->target];
  1510. struct sym_lcb *lp = sym_lp(tp, cp->lun);
  1511. if (lp) {
  1512. sym_remque(&cp->link2_ccbq);
  1513. sym_insque_tail(&cp->link2_ccbq,
  1514. &lp->waiting_ccbq);
  1515. if (cp->started) {
  1516. if (cp->tag != NO_TAG)
  1517. --lp->started_tags;
  1518. else
  1519. --lp->started_no_tag;
  1520. }
  1521. }
  1522. cp->started = 0;
  1523. continue;
  1524. }
  1525. #endif
  1526. sym_free_ccb(np, cp);
  1527. sym_xpt_done(np, cmd);
  1528. }
  1529. }
  1530. /*
  1531. * Complete all active CCBs with error.
  1532. * Used on CHIP/SCSI RESET.
  1533. */
  1534. static void sym_flush_busy_queue (struct sym_hcb *np, int cam_status)
  1535. {
  1536. /*
  1537. * Move all active CCBs to the COMP queue
  1538. * and flush this queue.
  1539. */
  1540. sym_que_splice(&np->busy_ccbq, &np->comp_ccbq);
  1541. sym_que_init(&np->busy_ccbq);
  1542. sym_flush_comp_queue(np, cam_status);
  1543. }
  1544. /*
  1545. * Start chip.
  1546. *
  1547. * 'reason' means:
  1548. * 0: initialisation.
  1549. * 1: SCSI BUS RESET delivered or received.
  1550. * 2: SCSI BUS MODE changed.
  1551. */
  1552. void sym_start_up(struct Scsi_Host *shost, int reason)
  1553. {
  1554. struct sym_data *sym_data = shost_priv(shost);
  1555. struct pci_dev *pdev = sym_data->pdev;
  1556. struct sym_hcb *np = sym_data->ncb;
  1557. int i;
  1558. u32 phys;
  1559. /*
  1560. * Reset chip if asked, otherwise just clear fifos.
  1561. */
  1562. if (reason == 1)
  1563. sym_soft_reset(np);
  1564. else {
  1565. OUTB(np, nc_stest3, TE|CSF);
  1566. OUTONB(np, nc_ctest3, CLF);
  1567. }
  1568. /*
  1569. * Clear Start Queue
  1570. */
  1571. phys = np->squeue_ba;
  1572. for (i = 0; i < MAX_QUEUE*2; i += 2) {
  1573. np->squeue[i] = cpu_to_scr(np->idletask_ba);
  1574. np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4);
  1575. }
  1576. np->squeue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
  1577. /*
  1578. * Start at first entry.
  1579. */
  1580. np->squeueput = 0;
  1581. /*
  1582. * Clear Done Queue
  1583. */
  1584. phys = np->dqueue_ba;
  1585. for (i = 0; i < MAX_QUEUE*2; i += 2) {
  1586. np->dqueue[i] = 0;
  1587. np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4);
  1588. }
  1589. np->dqueue[MAX_QUEUE*2-1] = cpu_to_scr(phys);
  1590. /*
  1591. * Start at first entry.
  1592. */
  1593. np->dqueueget = 0;
  1594. /*
  1595. * Install patches in scripts.
  1596. * This also let point to first position the start
  1597. * and done queue pointers used from SCRIPTS.
  1598. */
  1599. np->fw_patch(shost);
  1600. /*
  1601. * Wakeup all pending jobs.
  1602. */
  1603. sym_flush_busy_queue(np, DID_RESET);
  1604. /*
  1605. * Init chip.
  1606. */
  1607. OUTB(np, nc_istat, 0x00); /* Remove Reset, abort */
  1608. INB(np, nc_mbox1);
  1609. udelay(2000); /* The 895 needs time for the bus mode to settle */
  1610. OUTB(np, nc_scntl0, np->rv_scntl0 | 0xc0);
  1611. /* full arb., ena parity, par->ATN */
  1612. OUTB(np, nc_scntl1, 0x00); /* odd parity, and remove CRST!! */
  1613. sym_selectclock(np, np->rv_scntl3); /* Select SCSI clock */
  1614. OUTB(np, nc_scid , RRE|np->myaddr); /* Adapter SCSI address */
  1615. OUTW(np, nc_respid, 1ul<<np->myaddr); /* Id to respond to */
  1616. OUTB(np, nc_istat , SIGP ); /* Signal Process */
  1617. OUTB(np, nc_dmode , np->rv_dmode); /* Burst length, dma mode */
  1618. OUTB(np, nc_ctest5, np->rv_ctest5); /* Large fifo + large burst */
  1619. OUTB(np, nc_dcntl , NOCOM|np->rv_dcntl); /* Protect SFBR */
  1620. OUTB(np, nc_ctest3, np->rv_ctest3); /* Write and invalidate */
  1621. OUTB(np, nc_ctest4, np->rv_ctest4); /* Master parity checking */
  1622. /* Extended Sreq/Sack filtering not supported on the C10 */
  1623. if (np->features & FE_C10)
  1624. OUTB(np, nc_stest2, np->rv_stest2);
  1625. else
  1626. OUTB(np, nc_stest2, EXT|np->rv_stest2);
  1627. OUTB(np, nc_stest3, TE); /* TolerANT enable */
  1628. OUTB(np, nc_stime0, 0x0c); /* HTH disabled STO 0.25 sec */
  1629. /*
  1630. * For now, disable AIP generation on C1010-66.
  1631. */
  1632. if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_66)
  1633. OUTB(np, nc_aipcntl1, DISAIP);
  1634. /*
  1635. * C10101 rev. 0 errata.
  1636. * Errant SGE's when in narrow. Write bits 4 & 5 of
  1637. * STEST1 register to disable SGE. We probably should do
  1638. * that from SCRIPTS for each selection/reselection, but
  1639. * I just don't want. :)
  1640. */
  1641. if (pdev->device == PCI_DEVICE_ID_LSI_53C1010_33 &&
  1642. pdev->revision < 1)
  1643. OUTB(np, nc_stest1, INB(np, nc_stest1) | 0x30);
  1644. /*
  1645. * DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2.
  1646. * Disable overlapped arbitration for some dual function devices,
  1647. * regardless revision id (kind of post-chip-design feature. ;-))
  1648. */
  1649. if (pdev->device == PCI_DEVICE_ID_NCR_53C875)
  1650. OUTB(np, nc_ctest0, (1<<5));
  1651. else if (pdev->device == PCI_DEVICE_ID_NCR_53C896)
  1652. np->rv_ccntl0 |= DPR;
  1653. /*
  1654. * Write CCNTL0/CCNTL1 for chips capable of 64 bit addressing
  1655. * and/or hardware phase mismatch, since only such chips
  1656. * seem to support those IO registers.
  1657. */
  1658. if (np->features & (FE_DAC|FE_NOPM)) {
  1659. OUTB(np, nc_ccntl0, np->rv_ccntl0);
  1660. OUTB(np, nc_ccntl1, np->rv_ccntl1);
  1661. }
  1662. #if SYM_CONF_DMA_ADDRESSING_MODE == 2
  1663. /*
  1664. * Set up scratch C and DRS IO registers to map the 32 bit
  1665. * DMA address range our data structures are located in.
  1666. */
  1667. if (use_dac(np)) {
  1668. np->dmap_bah[0] = 0; /* ??? */
  1669. OUTL(np, nc_scrx[0], np->dmap_bah[0]);
  1670. OUTL(np, nc_drs, np->dmap_bah[0]);
  1671. }
  1672. #endif
  1673. /*
  1674. * If phase mismatch handled by scripts (895A/896/1010),
  1675. * set PM jump addresses.
  1676. */
  1677. if (np->features & FE_NOPM) {
  1678. OUTL(np, nc_pmjad1, SCRIPTB_BA(np, pm_handle));
  1679. OUTL(np, nc_pmjad2, SCRIPTB_BA(np, pm_handle));
  1680. }
  1681. /*
  1682. * Enable GPIO0 pin for writing if LED support from SCRIPTS.
  1683. * Also set GPIO5 and clear GPIO6 if hardware LED control.
  1684. */
  1685. if (np->features & FE_LED0)
  1686. OUTB(np, nc_gpcntl, INB(np, nc_gpcntl) & ~0x01);
  1687. else if (np->features & FE_LEDC)
  1688. OUTB(np, nc_gpcntl, (INB(np, nc_gpcntl) & ~0x41) | 0x20);
  1689. /*
  1690. * enable ints
  1691. */
  1692. OUTW(np, nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR);
  1693. OUTB(np, nc_dien , MDPE|BF|SSI|SIR|IID);
  1694. /*
  1695. * For 895/6 enable SBMC interrupt and save current SCSI bus mode.
  1696. * Try to eat the spurious SBMC interrupt that may occur when
  1697. * we reset the chip but not the SCSI BUS (at initialization).
  1698. */
  1699. if (np->features & (FE_ULTRA2|FE_ULTRA3)) {
  1700. OUTONW(np, nc_sien, SBMC);
  1701. if (reason == 0) {
  1702. INB(np, nc_mbox1);
  1703. mdelay(100);
  1704. INW(np, nc_sist);
  1705. }
  1706. np->scsi_mode = INB(np, nc_stest4) & SMODE;
  1707. }
  1708. /*
  1709. * Fill in target structure.
  1710. * Reinitialize usrsync.
  1711. * Reinitialize usrwide.
  1712. * Prepare sync negotiation according to actual SCSI bus mode.
  1713. */
  1714. for (i=0;i<SYM_CONF_MAX_TARGET;i++) {
  1715. struct sym_tcb *tp = &np->target[i];
  1716. tp->to_reset = 0;
  1717. tp->head.sval = 0;
  1718. tp->head.wval = np->rv_scntl3;
  1719. tp->head.uval = 0;
  1720. if (tp->lun0p)
  1721. tp->lun0p->to_clear = 0;
  1722. if (tp->lunmp) {
  1723. int ln;
  1724. for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++)
  1725. if (tp->lunmp[ln])
  1726. tp->lunmp[ln]->to_clear = 0;
  1727. }
  1728. }
  1729. /*
  1730. * Download SCSI SCRIPTS to on-chip RAM if present,
  1731. * and start script processor.
  1732. * We do the download preferently from the CPU.
  1733. * For platforms that may not support PCI memory mapping,
  1734. * we use simple SCRIPTS that performs MEMORY MOVEs.
  1735. */
  1736. phys = SCRIPTA_BA(np, init);
  1737. if (np->ram_ba) {
  1738. if (sym_verbose >= 2)
  1739. printf("%s: Downloading SCSI SCRIPTS.\n", sym_name(np));
  1740. memcpy_toio(np->s.ramaddr, np->scripta0, np->scripta_sz);
  1741. if (np->features & FE_RAM8K) {
  1742. memcpy_toio(np->s.ramaddr + 4096, np->scriptb0, np->scriptb_sz);
  1743. phys = scr_to_cpu(np->scr_ram_seg);
  1744. OUTL(np, nc_mmws, phys);
  1745. OUTL(np, nc_mmrs, phys);
  1746. OUTL(np, nc_sfs, phys);
  1747. phys = SCRIPTB_BA(np, start64);
  1748. }
  1749. }
  1750. np->istat_sem = 0;
  1751. OUTL(np, nc_dsa, np->hcb_ba);
  1752. OUTL_DSP(np, phys);
  1753. /*
  1754. * Notify the XPT about the RESET condition.
  1755. */
  1756. if (reason != 0)
  1757. sym_xpt_async_bus_reset(np);
  1758. }
  1759. /*
  1760. * Switch trans mode for current job and its target.
  1761. */
  1762. static void sym_settrans(struct sym_hcb *np, int target, u_char opts, u_char ofs,
  1763. u_char per, u_char wide, u_char div, u_char fak)
  1764. {
  1765. SYM_QUEHEAD *qp;
  1766. u_char sval, wval, uval;
  1767. struct sym_tcb *tp = &np->target[target];
  1768. assert(target == (INB(np, nc_sdid) & 0x0f));
  1769. sval = tp->head.sval;
  1770. wval = tp->head.wval;
  1771. uval = tp->head.uval;
  1772. #if 0
  1773. printf("XXXX sval=%x wval=%x uval=%x (%x)\n",
  1774. sval, wval, uval, np->rv_scntl3);
  1775. #endif
  1776. /*
  1777. * Set the offset.
  1778. */
  1779. if (!(np->features & FE_C10))
  1780. sval = (sval & ~0x1f) | ofs;
  1781. else
  1782. sval = (sval & ~0x3f) | ofs;
  1783. /*
  1784. * Set the sync divisor and extra clock factor.
  1785. */
  1786. if (ofs != 0) {
  1787. wval = (wval & ~0x70) | ((div+1) << 4);
  1788. if (!(np->features & FE_C10))
  1789. sval = (sval & ~0xe0) | (fak << 5);
  1790. else {
  1791. uval = uval & ~(XCLKH_ST|XCLKH_DT|XCLKS_ST|XCLKS_DT);
  1792. if (fak >= 1) uval |= (XCLKH_ST|XCLKH_DT);
  1793. if (fak >= 2) uval |= (XCLKS_ST|XCLKS_DT);
  1794. }
  1795. }
  1796. /*
  1797. * Set the bus width.
  1798. */
  1799. wval = wval & ~EWS;
  1800. if (wide != 0)
  1801. wval |= EWS;
  1802. /*
  1803. * Set misc. ultra enable bits.
  1804. */
  1805. if (np->features & FE_C10) {
  1806. uval = uval & ~(U3EN|AIPCKEN);
  1807. if (opts) {
  1808. assert(np->features & FE_U3EN);
  1809. uval |= U3EN;
  1810. }
  1811. } else {
  1812. wval = wval & ~ULTRA;
  1813. if (per <= 12) wval |= ULTRA;
  1814. }
  1815. /*
  1816. * Stop there if sync parameters are unchanged.
  1817. */
  1818. if (tp->head.sval == sval &&
  1819. tp->head.wval == wval &&
  1820. tp->head.uval == uval)
  1821. return;
  1822. tp->head.sval = sval;
  1823. tp->head.wval = wval;
  1824. tp->head.uval = uval;
  1825. /*
  1826. * Disable extended Sreq/Sack filtering if per < 50.
  1827. * Not supported on the C1010.
  1828. */
  1829. if (per < 50 && !(np->features & FE_C10))
  1830. OUTOFFB(np, nc_stest2, EXT);
  1831. /*
  1832. * set actual value and sync_status
  1833. */
  1834. OUTB(np, nc_sxfer, tp->head.sval);
  1835. OUTB(np, nc_scntl3, tp->head.wval);
  1836. if (np->features & FE_C10) {
  1837. OUTB(np, nc_scntl4, tp->head.uval);
  1838. }
  1839. /*
  1840. * patch ALL busy ccbs of this target.
  1841. */
  1842. FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
  1843. struct sym_ccb *cp;
  1844. cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
  1845. if (cp->target != target)
  1846. continue;
  1847. cp->phys.select.sel_scntl3 = tp->head.wval;
  1848. cp->phys.select.sel_sxfer = tp->head.sval;
  1849. if (np->features & FE_C10) {
  1850. cp->phys.select.sel_scntl4 = tp->head.uval;
  1851. }
  1852. }
  1853. }
  1854. static void sym_announce_transfer_rate(struct sym_tcb *tp)
  1855. {
  1856. struct scsi_target *starget = tp->starget;
  1857. if (tp->tprint.period != spi_period(starget) ||
  1858. tp->tprint.offset != spi_offset(starget) ||
  1859. tp->tprint.width != spi_width(starget) ||
  1860. tp->tprint.iu != spi_iu(starget) ||
  1861. tp->tprint.dt != spi_dt(starget) ||
  1862. tp->tprint.qas != spi_qas(starget) ||
  1863. !tp->tprint.check_nego) {
  1864. tp->tprint.period = spi_period(starget);
  1865. tp->tprint.offset = spi_offset(starget);
  1866. tp->tprint.width = spi_width(starget);
  1867. tp->tprint.iu = spi_iu(starget);
  1868. tp->tprint.dt = spi_dt(starget);
  1869. tp->tprint.qas = spi_qas(starget);
  1870. tp->tprint.check_nego = 1;
  1871. spi_display_xfer_agreement(starget);
  1872. }
  1873. }
  1874. /*
  1875. * We received a WDTR.
  1876. * Let everything be aware of the changes.
  1877. */
  1878. static void sym_setwide(struct sym_hcb *np, int target, u_char wide)
  1879. {
  1880. struct sym_tcb *tp = &np->target[target];
  1881. struct scsi_target *starget = tp->starget;
  1882. sym_settrans(np, target, 0, 0, 0, wide, 0, 0);
  1883. if (wide)
  1884. tp->tgoal.renego = NS_WIDE;
  1885. else
  1886. tp->tgoal.renego = 0;
  1887. tp->tgoal.check_nego = 0;
  1888. tp->tgoal.width = wide;
  1889. spi_offset(starget) = 0;
  1890. spi_period(starget) = 0;
  1891. spi_width(starget) = wide;
  1892. spi_iu(starget) = 0;
  1893. spi_dt(starget) = 0;
  1894. spi_qas(starget) = 0;
  1895. if (sym_verbose >= 3)
  1896. sym_announce_transfer_rate(tp);
  1897. }
  1898. /*
  1899. * We received a SDTR.
  1900. * Let everything be aware of the changes.
  1901. */
  1902. static void
  1903. sym_setsync(struct sym_hcb *np, int target,
  1904. u_char ofs, u_char per, u_char div, u_char fak)
  1905. {
  1906. struct sym_tcb *tp = &np->target[target];
  1907. struct scsi_target *starget = tp->starget;
  1908. u_char wide = (tp->head.wval & EWS) ? BUS_16_BIT : BUS_8_BIT;
  1909. sym_settrans(np, target, 0, ofs, per, wide, div, fak);
  1910. if (wide)
  1911. tp->tgoal.renego = NS_WIDE;
  1912. else if (ofs)
  1913. tp->tgoal.renego = NS_SYNC;
  1914. else
  1915. tp->tgoal.renego = 0;
  1916. spi_period(starget) = per;
  1917. spi_offset(starget) = ofs;
  1918. spi_iu(starget) = spi_dt(starget) = spi_qas(starget) = 0;
  1919. if (!tp->tgoal.dt && !tp->tgoal.iu && !tp->tgoal.qas) {
  1920. tp->tgoal.period = per;
  1921. tp->tgoal.offset = ofs;
  1922. tp->tgoal.check_nego = 0;
  1923. }
  1924. sym_announce_transfer_rate(tp);
  1925. }
  1926. /*
  1927. * We received a PPR.
  1928. * Let everything be aware of the changes.
  1929. */
  1930. static void
  1931. sym_setpprot(struct sym_hcb *np, int target, u_char opts, u_char ofs,
  1932. u_char per, u_char wide, u_char div, u_char fak)
  1933. {
  1934. struct sym_tcb *tp = &np->target[target];
  1935. struct scsi_target *starget = tp->starget;
  1936. sym_settrans(np, target, opts, ofs, per, wide, div, fak);
  1937. if (wide || ofs)
  1938. tp->tgoal.renego = NS_PPR;
  1939. else
  1940. tp->tgoal.renego = 0;
  1941. spi_width(starget) = tp->tgoal.width = wide;
  1942. spi_period(starget) = tp->tgoal.period = per;
  1943. spi_offset(starget) = tp->tgoal.offset = ofs;
  1944. spi_iu(starget) = tp->tgoal.iu = !!(opts & PPR_OPT_IU);
  1945. spi_dt(starget) = tp->tgoal.dt = !!(opts & PPR_OPT_DT);
  1946. spi_qas(starget) = tp->tgoal.qas = !!(opts & PPR_OPT_QAS);
  1947. tp->tgoal.check_nego = 0;
  1948. sym_announce_transfer_rate(tp);
  1949. }
  1950. /*
  1951. * generic recovery from scsi interrupt
  1952. *
  1953. * The doc says that when the chip gets an SCSI interrupt,
  1954. * it tries to stop in an orderly fashion, by completing
  1955. * an instruction fetch that had started or by flushing
  1956. * the DMA fifo for a write to memory that was executing.
  1957. * Such a fashion is not enough to know if the instruction
  1958. * that was just before the current DSP value has been
  1959. * executed or not.
  1960. *
  1961. * There are some small SCRIPTS sections that deal with
  1962. * the start queue and the done queue that may break any
  1963. * assomption from the C code if we are interrupted
  1964. * inside, so we reset if this happens. Btw, since these
  1965. * SCRIPTS sections are executed while the SCRIPTS hasn't
  1966. * started SCSI operations, it is very unlikely to happen.
  1967. *
  1968. * All the driver data structures are supposed to be
  1969. * allocated from the same 4 GB memory window, so there
  1970. * is a 1 to 1 relationship between DSA and driver data
  1971. * structures. Since we are careful :) to invalidate the
  1972. * DSA when we complete a command or when the SCRIPTS
  1973. * pushes a DSA into a queue, we can trust it when it
  1974. * points to a CCB.
  1975. */
  1976. static void sym_recover_scsi_int (struct sym_hcb *np, u_char hsts)
  1977. {
  1978. u32 dsp = INL(np, nc_dsp);
  1979. u32 dsa = INL(np, nc_dsa);
  1980. struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
  1981. /*
  1982. * If we haven't been interrupted inside the SCRIPTS
  1983. * critical pathes, we can safely restart the SCRIPTS
  1984. * and trust the DSA value if it matches a CCB.
  1985. */
  1986. if ((!(dsp > SCRIPTA_BA(np, getjob_begin) &&
  1987. dsp < SCRIPTA_BA(np, getjob_end) + 1)) &&
  1988. (!(dsp > SCRIPTA_BA(np, ungetjob) &&
  1989. dsp < SCRIPTA_BA(np, reselect) + 1)) &&
  1990. (!(dsp > SCRIPTB_BA(np, sel_for_abort) &&
  1991. dsp < SCRIPTB_BA(np, sel_for_abort_1) + 1)) &&
  1992. (!(dsp > SCRIPTA_BA(np, done) &&
  1993. dsp < SCRIPTA_BA(np, done_end) + 1))) {
  1994. OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
  1995. OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */
  1996. /*
  1997. * If we have a CCB, let the SCRIPTS call us back for
  1998. * the handling of the error with SCRATCHA filled with
  1999. * STARTPOS. This way, we will be able to freeze the
  2000. * device queue and requeue awaiting IOs.
  2001. */
  2002. if (cp) {
  2003. cp->host_status = hsts;
  2004. OUTL_DSP(np, SCRIPTA_BA(np, complete_error));
  2005. }
  2006. /*
  2007. * Otherwise just restart the SCRIPTS.
  2008. */
  2009. else {
  2010. OUTL(np, nc_dsa, 0xffffff);
  2011. OUTL_DSP(np, SCRIPTA_BA(np, start));
  2012. }
  2013. }
  2014. else
  2015. goto reset_all;
  2016. return;
  2017. reset_all:
  2018. sym_start_reset(np);
  2019. }
  2020. /*
  2021. * chip exception handler for selection timeout
  2022. */
  2023. static void sym_int_sto (struct sym_hcb *np)
  2024. {
  2025. u32 dsp = INL(np, nc_dsp);
  2026. if (DEBUG_FLAGS & DEBUG_TINY) printf ("T");
  2027. if (dsp == SCRIPTA_BA(np, wf_sel_done) + 8)
  2028. sym_recover_scsi_int(np, HS_SEL_TIMEOUT);
  2029. else
  2030. sym_start_reset(np);
  2031. }
  2032. /*
  2033. * chip exception handler for unexpected disconnect
  2034. */
  2035. static void sym_int_udc (struct sym_hcb *np)
  2036. {
  2037. printf ("%s: unexpected disconnect\n", sym_name(np));
  2038. sym_recover_scsi_int(np, HS_UNEXPECTED);
  2039. }
  2040. /*
  2041. * chip exception handler for SCSI bus mode change
  2042. *
  2043. * spi2-r12 11.2.3 says a transceiver mode change must
  2044. * generate a reset event and a device that detects a reset
  2045. * event shall initiate a hard reset. It says also that a
  2046. * device that detects a mode change shall set data transfer
  2047. * mode to eight bit asynchronous, etc...
  2048. * So, just reinitializing all except chip should be enough.
  2049. */
  2050. static void sym_int_sbmc(struct Scsi_Host *shost)
  2051. {
  2052. struct sym_hcb *np = sym_get_hcb(shost);
  2053. u_char scsi_mode = INB(np, nc_stest4) & SMODE;
  2054. /*
  2055. * Notify user.
  2056. */
  2057. printf("%s: SCSI BUS mode change from %s to %s.\n", sym_name(np),
  2058. sym_scsi_bus_mode(np->scsi_mode), sym_scsi_bus_mode(scsi_mode));
  2059. /*
  2060. * Should suspend command processing for a few seconds and
  2061. * reinitialize all except the chip.
  2062. */
  2063. sym_start_up(shost, 2);
  2064. }
  2065. /*
  2066. * chip exception handler for SCSI parity error.
  2067. *
  2068. * When the chip detects a SCSI parity error and is
  2069. * currently executing a (CH)MOV instruction, it does
  2070. * not interrupt immediately, but tries to finish the
  2071. * transfer of the current scatter entry before
  2072. * interrupting. The following situations may occur:
  2073. *
  2074. * - The complete scatter entry has been transferred
  2075. * without the device having changed phase.
  2076. * The chip will then interrupt with the DSP pointing
  2077. * to the instruction that follows the MOV.
  2078. *
  2079. * - A phase mismatch occurs before the MOV finished
  2080. * and phase errors are to be handled by the C code.
  2081. * The chip will then interrupt with both PAR and MA
  2082. * conditions set.
  2083. *
  2084. * - A phase mismatch occurs before the MOV finished and
  2085. * phase errors are to be handled by SCRIPTS.
  2086. * The chip will load the DSP with the phase mismatch
  2087. * JUMP address and interrupt the host processor.
  2088. */
  2089. static void sym_int_par (struct sym_hcb *np, u_short sist)
  2090. {
  2091. u_char hsts = INB(np, HS_PRT);
  2092. u32 dsp = INL(np, nc_dsp);
  2093. u32 dbc = INL(np, nc_dbc);
  2094. u32 dsa = INL(np, nc_dsa);
  2095. u_char sbcl = INB(np, nc_sbcl);
  2096. u_char cmd = dbc >> 24;
  2097. int phase = cmd & 7;
  2098. struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
  2099. if (printk_ratelimit())
  2100. printf("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n",
  2101. sym_name(np), hsts, dbc, sbcl);
  2102. /*
  2103. * Check that the chip is connected to the SCSI BUS.
  2104. */
  2105. if (!(INB(np, nc_scntl1) & ISCON)) {
  2106. sym_recover_scsi_int(np, HS_UNEXPECTED);
  2107. return;
  2108. }
  2109. /*
  2110. * If the nexus is not clearly identified, reset the bus.
  2111. * We will try to do better later.
  2112. */
  2113. if (!cp)
  2114. goto reset_all;
  2115. /*
  2116. * Check instruction was a MOV, direction was INPUT and
  2117. * ATN is asserted.
  2118. */
  2119. if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8))
  2120. goto reset_all;
  2121. /*
  2122. * Keep track of the parity error.
  2123. */
  2124. OUTONB(np, HF_PRT, HF_EXT_ERR);
  2125. cp->xerr_status |= XE_PARITY_ERR;
  2126. /*
  2127. * Prepare the message to send to the device.
  2128. */
  2129. np->msgout[0] = (phase == 7) ? M_PARITY : M_ID_ERROR;
  2130. /*
  2131. * If the old phase was DATA IN phase, we have to deal with
  2132. * the 3 situations described above.
  2133. * For other input phases (MSG IN and STATUS), the device
  2134. * must resend the whole thing that failed parity checking
  2135. * or signal error. So, jumping to dispatcher should be OK.
  2136. */
  2137. if (phase == 1 || phase == 5) {
  2138. /* Phase mismatch handled by SCRIPTS */
  2139. if (dsp == SCRIPTB_BA(np, pm_handle))
  2140. OUTL_DSP(np, dsp);
  2141. /* Phase mismatch handled by the C code */
  2142. else if (sist & MA)
  2143. sym_int_ma (np);
  2144. /* No phase mismatch occurred */
  2145. else {
  2146. sym_set_script_dp (np, cp, dsp);
  2147. OUTL_DSP(np, SCRIPTA_BA(np, dispatch));
  2148. }
  2149. }
  2150. else if (phase == 7) /* We definitely cannot handle parity errors */
  2151. #if 1 /* in message-in phase due to the relection */
  2152. goto reset_all; /* path and various message anticipations. */
  2153. #else
  2154. OUTL_DSP(np, SCRIPTA_BA(np, clrack));
  2155. #endif
  2156. else
  2157. OUTL_DSP(np, SCRIPTA_BA(np, dispatch));
  2158. return;
  2159. reset_all:
  2160. sym_start_reset(np);
  2161. return;
  2162. }
  2163. /*
  2164. * chip exception handler for phase errors.
  2165. *
  2166. * We have to construct a new transfer descriptor,
  2167. * to transfer the rest of the current block.
  2168. */
  2169. static void sym_int_ma (struct sym_hcb *np)
  2170. {
  2171. u32 dbc;
  2172. u32 rest;
  2173. u32 dsp;
  2174. u32 dsa;
  2175. u32 nxtdsp;
  2176. u32 *vdsp;
  2177. u32 oadr, olen;
  2178. u32 *tblp;
  2179. u32 newcmd;
  2180. u_int delta;
  2181. u_char cmd;
  2182. u_char hflags, hflags0;
  2183. struct sym_pmc *pm;
  2184. struct sym_ccb *cp;
  2185. dsp = INL(np, nc_dsp);
  2186. dbc = INL(np, nc_dbc);
  2187. dsa = INL(np, nc_dsa);
  2188. cmd = dbc >> 24;
  2189. rest = dbc & 0xffffff;
  2190. delta = 0;
  2191. /*
  2192. * locate matching cp if any.
  2193. */
  2194. cp = sym_ccb_from_dsa(np, dsa);
  2195. /*
  2196. * Donnot take into account dma fifo and various buffers in
  2197. * INPUT phase since the chip flushes everything before
  2198. * raising the MA interrupt for interrupted INPUT phases.
  2199. * For DATA IN phase, we will check for the SWIDE later.
  2200. */
  2201. if ((cmd & 7) != 1 && (cmd & 7) != 5) {
  2202. u_char ss0, ss2;
  2203. if (np->features & FE_DFBC)
  2204. delta = INW(np, nc_dfbc);
  2205. else {
  2206. u32 dfifo;
  2207. /*
  2208. * Read DFIFO, CTEST[4-6] using 1 PCI bus ownership.
  2209. */
  2210. dfifo = INL(np, nc_dfifo);
  2211. /*
  2212. * Calculate remaining bytes in DMA fifo.
  2213. * (CTEST5 = dfifo >> 16)
  2214. */
  2215. if (dfifo & (DFS << 16))
  2216. delta = ((((dfifo >> 8) & 0x300) |
  2217. (dfifo & 0xff)) - rest) & 0x3ff;
  2218. else
  2219. delta = ((dfifo & 0xff) - rest) & 0x7f;
  2220. }
  2221. /*
  2222. * The data in the dma fifo has not been transferred to
  2223. * the target -> add the amount to the rest
  2224. * and clear the data.
  2225. * Check the sstat2 register in case of wide transfer.
  2226. */
  2227. rest += delta;
  2228. ss0 = INB(np, nc_sstat0);
  2229. if (ss0 & OLF) rest++;
  2230. if (!(np->features & FE_C10))
  2231. if (ss0 & ORF) rest++;
  2232. if (cp && (cp->phys.select.sel_scntl3 & EWS)) {
  2233. ss2 = INB(np, nc_sstat2);
  2234. if (ss2 & OLF1) rest++;
  2235. if (!(np->features & FE_C10))
  2236. if (ss2 & ORF1) rest++;
  2237. }
  2238. /*
  2239. * Clear fifos.
  2240. */
  2241. OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* dma fifo */
  2242. OUTB(np, nc_stest3, TE|CSF); /* scsi fifo */
  2243. }
  2244. /*
  2245. * log the information
  2246. */
  2247. if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_PHASE))
  2248. printf ("P%x%x RL=%d D=%d ", cmd&7, INB(np, nc_sbcl)&7,
  2249. (unsigned) rest, (unsigned) delta);
  2250. /*
  2251. * try to find the interrupted script command,
  2252. * and the address at which to continue.
  2253. */
  2254. vdsp = NULL;
  2255. nxtdsp = 0;
  2256. if (dsp > np->scripta_ba &&
  2257. dsp <= np->scripta_ba + np->scripta_sz) {
  2258. vdsp = (u32 *)((char*)np->scripta0 + (dsp-np->scripta_ba-8));
  2259. nxtdsp = dsp;
  2260. }
  2261. else if (dsp > np->scriptb_ba &&
  2262. dsp <= np->scriptb_ba + np->scriptb_sz) {
  2263. vdsp = (u32 *)((char*)np->scriptb0 + (dsp-np->scriptb_ba-8));
  2264. nxtdsp = dsp;
  2265. }
  2266. /*
  2267. * log the information
  2268. */
  2269. if (DEBUG_FLAGS & DEBUG_PHASE) {
  2270. printf ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ",
  2271. cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd);
  2272. }
  2273. if (!vdsp) {
  2274. printf ("%s: interrupted SCRIPT address not found.\n",
  2275. sym_name (np));
  2276. goto reset_all;
  2277. }
  2278. if (!cp) {
  2279. printf ("%s: SCSI phase error fixup: CCB already dequeued.\n",
  2280. sym_name (np));
  2281. goto reset_all;
  2282. }
  2283. /*
  2284. * get old startaddress and old length.
  2285. */
  2286. oadr = scr_to_cpu(vdsp[1]);
  2287. if (cmd & 0x10) { /* Table indirect */
  2288. tblp = (u32 *) ((char*) &cp->phys + oadr);
  2289. olen = scr_to_cpu(tblp[0]);
  2290. oadr = scr_to_cpu(tblp[1]);
  2291. } else {
  2292. tblp = (u32 *) 0;
  2293. olen = scr_to_cpu(vdsp[0]) & 0xffffff;
  2294. }
  2295. if (DEBUG_FLAGS & DEBUG_PHASE) {
  2296. printf ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n",
  2297. (unsigned) (scr_to_cpu(vdsp[0]) >> 24),
  2298. tblp,
  2299. (unsigned) olen,
  2300. (unsigned) oadr);
  2301. }
  2302. /*
  2303. * check cmd against assumed interrupted script command.
  2304. * If dt data phase, the MOVE instruction hasn't bit 4 of
  2305. * the phase.
  2306. */
  2307. if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0]) >> 24)) {
  2308. sym_print_addr(cp->cmd,
  2309. "internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n",
  2310. cmd, scr_to_cpu(vdsp[0]) >> 24);
  2311. goto reset_all;
  2312. }
  2313. /*
  2314. * if old phase not dataphase, leave here.
  2315. */
  2316. if (cmd & 2) {
  2317. sym_print_addr(cp->cmd,
  2318. "phase change %x-%x %d@%08x resid=%d.\n",
  2319. cmd&7, INB(np, nc_sbcl)&7, (unsigned)olen,
  2320. (unsigned)oadr, (unsigned)rest);
  2321. goto unexpected_phase;
  2322. }
  2323. /*
  2324. * Choose the correct PM save area.
  2325. *
  2326. * Look at the PM_SAVE SCRIPT if you want to understand
  2327. * this stuff. The equivalent code is implemented in
  2328. * SCRIPTS for the 895A, 896 and 1010 that are able to
  2329. * handle PM from the SCRIPTS processor.
  2330. */
  2331. hflags0 = INB(np, HF_PRT);
  2332. hflags = hflags0;
  2333. if (hflags & (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)) {
  2334. if (hflags & HF_IN_PM0)
  2335. nxtdsp = scr_to_cpu(cp->phys.pm0.ret);
  2336. else if (hflags & HF_IN_PM1)
  2337. nxtdsp = scr_to_cpu(cp->phys.pm1.ret);
  2338. if (hflags & HF_DP_SAVED)
  2339. hflags ^= HF_ACT_PM;
  2340. }
  2341. if (!(hflags & HF_ACT_PM)) {
  2342. pm = &cp->phys.pm0;
  2343. newcmd = SCRIPTA_BA(np, pm0_data);
  2344. }
  2345. else {
  2346. pm = &cp->phys.pm1;
  2347. newcmd = SCRIPTA_BA(np, pm1_data);
  2348. }
  2349. hflags &= ~(HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED);
  2350. if (hflags != hflags0)
  2351. OUTB(np, HF_PRT, hflags);
  2352. /*
  2353. * fillin the phase mismatch context
  2354. */
  2355. pm->sg.addr = cpu_to_scr(oadr + olen - rest);
  2356. pm->sg.size = cpu_to_scr(rest);
  2357. pm->ret = cpu_to_scr(nxtdsp);
  2358. /*
  2359. * If we have a SWIDE,
  2360. * - prepare the address to write the SWIDE from SCRIPTS,
  2361. * - compute the SCRIPTS address to restart from,
  2362. * - move current data pointer context by one byte.
  2363. */
  2364. nxtdsp = SCRIPTA_BA(np, dispatch);
  2365. if ((cmd & 7) == 1 && cp && (cp->phys.select.sel_scntl3 & EWS) &&
  2366. (INB(np, nc_scntl2) & WSR)) {
  2367. u32 tmp;
  2368. /*
  2369. * Set up the table indirect for the MOVE
  2370. * of the residual byte and adjust the data
  2371. * pointer context.
  2372. */
  2373. tmp = scr_to_cpu(pm->sg.addr);
  2374. cp->phys.wresid.addr = cpu_to_scr(tmp);
  2375. pm->sg.addr = cpu_to_scr(tmp + 1);
  2376. tmp = scr_to_cpu(pm->sg.size);
  2377. cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1);
  2378. pm->sg.size = cpu_to_scr(tmp - 1);
  2379. /*
  2380. * If only the residual byte is to be moved,
  2381. * no PM context is needed.
  2382. */
  2383. if ((tmp&0xffffff) == 1)
  2384. newcmd = pm->ret;
  2385. /*
  2386. * Prepare the address of SCRIPTS that will
  2387. * move the residual byte to memory.
  2388. */
  2389. nxtdsp = SCRIPTB_BA(np, wsr_ma_helper);
  2390. }
  2391. if (DEBUG_FLAGS & DEBUG_PHASE) {
  2392. sym_print_addr(cp->cmd, "PM %x %x %x / %x %x %x.\n",
  2393. hflags0, hflags, newcmd,
  2394. (unsigned)scr_to_cpu(pm->sg.addr),
  2395. (unsigned)scr_to_cpu(pm->sg.size),
  2396. (unsigned)scr_to_cpu(pm->ret));
  2397. }
  2398. /*
  2399. * Restart the SCRIPTS processor.
  2400. */
  2401. sym_set_script_dp (np, cp, newcmd);
  2402. OUTL_DSP(np, nxtdsp);
  2403. return;
  2404. /*
  2405. * Unexpected phase changes that occurs when the current phase
  2406. * is not a DATA IN or DATA OUT phase are due to error conditions.
  2407. * Such event may only happen when the SCRIPTS is using a
  2408. * multibyte SCSI MOVE.
  2409. *
  2410. * Phase change Some possible cause
  2411. *
  2412. * COMMAND --> MSG IN SCSI parity error detected by target.
  2413. * COMMAND --> STATUS Bad command or refused by target.
  2414. * MSG OUT --> MSG IN Message rejected by target.
  2415. * MSG OUT --> COMMAND Bogus target that discards extended
  2416. * negotiation messages.
  2417. *
  2418. * The code below does not care of the new phase and so
  2419. * trusts the target. Why to annoy it ?
  2420. * If the interrupted phase is COMMAND phase, we restart at
  2421. * dispatcher.
  2422. * If a target does not get all the messages after selection,
  2423. * the code assumes blindly that the target discards extended
  2424. * messages and clears the negotiation status.
  2425. * If the target does not want all our response to negotiation,
  2426. * we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids
  2427. * bloat for such a should_not_happen situation).
  2428. * In all other situation, we reset the BUS.
  2429. * Are these assumptions reasonable ? (Wait and see ...)
  2430. */
  2431. unexpected_phase:
  2432. dsp -= 8;
  2433. nxtdsp = 0;
  2434. switch (cmd & 7) {
  2435. case 2: /* COMMAND phase */
  2436. nxtdsp = SCRIPTA_BA(np, dispatch);
  2437. break;
  2438. #if 0
  2439. case 3: /* STATUS phase */
  2440. nxtdsp = SCRIPTA_BA(np, dispatch);
  2441. break;
  2442. #endif
  2443. case 6: /* MSG OUT phase */
  2444. /*
  2445. * If the device may want to use untagged when we want
  2446. * tagged, we prepare an IDENTIFY without disc. granted,
  2447. * since we will not be able to handle reselect.
  2448. * Otherwise, we just don't care.
  2449. */
  2450. if (dsp == SCRIPTA_BA(np, send_ident)) {
  2451. if (cp->tag != NO_TAG && olen - rest <= 3) {
  2452. cp->host_status = HS_BUSY;
  2453. np->msgout[0] = IDENTIFY(0, cp->lun);
  2454. nxtdsp = SCRIPTB_BA(np, ident_break_atn);
  2455. }
  2456. else
  2457. nxtdsp = SCRIPTB_BA(np, ident_break);
  2458. }
  2459. else if (dsp == SCRIPTB_BA(np, send_wdtr) ||
  2460. dsp == SCRIPTB_BA(np, send_sdtr) ||
  2461. dsp == SCRIPTB_BA(np, send_ppr)) {
  2462. nxtdsp = SCRIPTB_BA(np, nego_bad_phase);
  2463. if (dsp == SCRIPTB_BA(np, send_ppr)) {
  2464. struct scsi_device *dev = cp->cmd->device;
  2465. dev->ppr = 0;
  2466. }
  2467. }
  2468. break;
  2469. #if 0
  2470. case 7: /* MSG IN phase */
  2471. nxtdsp = SCRIPTA_BA(np, clrack);
  2472. break;
  2473. #endif
  2474. }
  2475. if (nxtdsp) {
  2476. OUTL_DSP(np, nxtdsp);
  2477. return;
  2478. }
  2479. reset_all:
  2480. sym_start_reset(np);
  2481. }
  2482. /*
  2483. * chip interrupt handler
  2484. *
  2485. * In normal situations, interrupt conditions occur one at
  2486. * a time. But when something bad happens on the SCSI BUS,
  2487. * the chip may raise several interrupt flags before
  2488. * stopping and interrupting the CPU. The additionnal
  2489. * interrupt flags are stacked in some extra registers
  2490. * after the SIP and/or DIP flag has been raised in the
  2491. * ISTAT. After the CPU has read the interrupt condition
  2492. * flag from SIST or DSTAT, the chip unstacks the other
  2493. * interrupt flags and sets the corresponding bits in
  2494. * SIST or DSTAT. Since the chip starts stacking once the
  2495. * SIP or DIP flag is set, there is a small window of time
  2496. * where the stacking does not occur.
  2497. *
  2498. * Typically, multiple interrupt conditions may happen in
  2499. * the following situations:
  2500. *
  2501. * - SCSI parity error + Phase mismatch (PAR|MA)
  2502. * When an parity error is detected in input phase
  2503. * and the device switches to msg-in phase inside a
  2504. * block MOV.
  2505. * - SCSI parity error + Unexpected disconnect (PAR|UDC)
  2506. * When a stupid device does not want to handle the
  2507. * recovery of an SCSI parity error.
  2508. * - Some combinations of STO, PAR, UDC, ...
  2509. * When using non compliant SCSI stuff, when user is
  2510. * doing non compliant hot tampering on the BUS, when
  2511. * something really bad happens to a device, etc ...
  2512. *
  2513. * The heuristic suggested by SYMBIOS to handle
  2514. * multiple interrupts is to try unstacking all
  2515. * interrupts conditions and to handle them on some
  2516. * priority based on error severity.
  2517. * This will work when the unstacking has been
  2518. * successful, but we cannot be 100 % sure of that,
  2519. * since the CPU may have been faster to unstack than
  2520. * the chip is able to stack. Hmmm ... But it seems that
  2521. * such a situation is very unlikely to happen.
  2522. *
  2523. * If this happen, for example STO caught by the CPU
  2524. * then UDC happenning before the CPU have restarted
  2525. * the SCRIPTS, the driver may wrongly complete the
  2526. * same command on UDC, since the SCRIPTS didn't restart
  2527. * and the DSA still points to the same command.
  2528. * We avoid this situation by setting the DSA to an
  2529. * invalid value when the CCB is completed and before
  2530. * restarting the SCRIPTS.
  2531. *
  2532. * Another issue is that we need some section of our
  2533. * recovery procedures to be somehow uninterruptible but
  2534. * the SCRIPTS processor does not provides such a
  2535. * feature. For this reason, we handle recovery preferently
  2536. * from the C code and check against some SCRIPTS critical
  2537. * sections from the C code.
  2538. *
  2539. * Hopefully, the interrupt handling of the driver is now
  2540. * able to resist to weird BUS error conditions, but donnot
  2541. * ask me for any guarantee that it will never fail. :-)
  2542. * Use at your own decision and risk.
  2543. */
  2544. irqreturn_t sym_interrupt(struct Scsi_Host *shost)
  2545. {
  2546. struct sym_data *sym_data = shost_priv(shost);
  2547. struct sym_hcb *np = sym_data->ncb;
  2548. struct pci_dev *pdev = sym_data->pdev;
  2549. u_char istat, istatc;
  2550. u_char dstat;
  2551. u_short sist;
  2552. /*
  2553. * interrupt on the fly ?
  2554. * (SCRIPTS may still be running)
  2555. *
  2556. * A `dummy read' is needed to ensure that the
  2557. * clear of the INTF flag reaches the device
  2558. * and that posted writes are flushed to memory
  2559. * before the scanning of the DONE queue.
  2560. * Note that SCRIPTS also (dummy) read to memory
  2561. * prior to deliver the INTF interrupt condition.
  2562. */
  2563. istat = INB(np, nc_istat);
  2564. if (istat & INTF) {
  2565. OUTB(np, nc_istat, (istat & SIGP) | INTF | np->istat_sem);
  2566. istat |= INB(np, nc_istat); /* DUMMY READ */
  2567. if (DEBUG_FLAGS & DEBUG_TINY) printf ("F ");
  2568. sym_wakeup_done(np);
  2569. }
  2570. if (!(istat & (SIP|DIP)))
  2571. return (istat & INTF) ? IRQ_HANDLED : IRQ_NONE;
  2572. #if 0 /* We should never get this one */
  2573. if (istat & CABRT)
  2574. OUTB(np, nc_istat, CABRT);
  2575. #endif
  2576. /*
  2577. * PAR and MA interrupts may occur at the same time,
  2578. * and we need to know of both in order to handle
  2579. * this situation properly. We try to unstack SCSI
  2580. * interrupts for that reason. BTW, I dislike a LOT
  2581. * such a loop inside the interrupt routine.
  2582. * Even if DMA interrupt stacking is very unlikely to
  2583. * happen, we also try unstacking these ones, since
  2584. * this has no performance impact.
  2585. */
  2586. sist = 0;
  2587. dstat = 0;
  2588. istatc = istat;
  2589. do {
  2590. if (istatc & SIP)
  2591. sist |= INW(np, nc_sist);
  2592. if (istatc & DIP)
  2593. dstat |= INB(np, nc_dstat);
  2594. istatc = INB(np, nc_istat);
  2595. istat |= istatc;
  2596. /* Prevent deadlock waiting on a condition that may
  2597. * never clear. */
  2598. if (unlikely(sist == 0xffff && dstat == 0xff)) {
  2599. if (pci_channel_offline(pdev))
  2600. return IRQ_NONE;
  2601. }
  2602. } while (istatc & (SIP|DIP));
  2603. if (DEBUG_FLAGS & DEBUG_TINY)
  2604. printf ("<%d|%x:%x|%x:%x>",
  2605. (int)INB(np, nc_scr0),
  2606. dstat,sist,
  2607. (unsigned)INL(np, nc_dsp),
  2608. (unsigned)INL(np, nc_dbc));
  2609. /*
  2610. * On paper, a memory read barrier may be needed here to
  2611. * prevent out of order LOADs by the CPU from having
  2612. * prefetched stale data prior to DMA having occurred.
  2613. * And since we are paranoid ... :)
  2614. */
  2615. MEMORY_READ_BARRIER();
  2616. /*
  2617. * First, interrupts we want to service cleanly.
  2618. *
  2619. * Phase mismatch (MA) is the most frequent interrupt
  2620. * for chip earlier than the 896 and so we have to service
  2621. * it as quickly as possible.
  2622. * A SCSI parity error (PAR) may be combined with a phase
  2623. * mismatch condition (MA).
  2624. * Programmed interrupts (SIR) are used to call the C code
  2625. * from SCRIPTS.
  2626. * The single step interrupt (SSI) is not used in this
  2627. * driver.
  2628. */
  2629. if (!(sist & (STO|GEN|HTH|SGE|UDC|SBMC|RST)) &&
  2630. !(dstat & (MDPE|BF|ABRT|IID))) {
  2631. if (sist & PAR) sym_int_par (np, sist);
  2632. else if (sist & MA) sym_int_ma (np);
  2633. else if (dstat & SIR) sym_int_sir(np);
  2634. else if (dstat & SSI) OUTONB_STD();
  2635. else goto unknown_int;
  2636. return IRQ_HANDLED;
  2637. }
  2638. /*
  2639. * Now, interrupts that donnot happen in normal
  2640. * situations and that we may need to recover from.
  2641. *
  2642. * On SCSI RESET (RST), we reset everything.
  2643. * On SCSI BUS MODE CHANGE (SBMC), we complete all
  2644. * active CCBs with RESET status, prepare all devices
  2645. * for negotiating again and restart the SCRIPTS.
  2646. * On STO and UDC, we complete the CCB with the corres-
  2647. * ponding status and restart the SCRIPTS.
  2648. */
  2649. if (sist & RST) {
  2650. printf("%s: SCSI BUS reset detected.\n", sym_name(np));
  2651. sym_start_up(shost, 1);
  2652. return IRQ_HANDLED;
  2653. }
  2654. OUTB(np, nc_ctest3, np->rv_ctest3 | CLF); /* clear dma fifo */
  2655. OUTB(np, nc_stest3, TE|CSF); /* clear scsi fifo */
  2656. if (!(sist & (GEN|HTH|SGE)) &&
  2657. !(dstat & (MDPE|BF|ABRT|IID))) {
  2658. if (sist & SBMC) sym_int_sbmc(shost);
  2659. else if (sist & STO) sym_int_sto (np);
  2660. else if (sist & UDC) sym_int_udc (np);
  2661. else goto unknown_int;
  2662. return IRQ_HANDLED;
  2663. }
  2664. /*
  2665. * Now, interrupts we are not able to recover cleanly.
  2666. *
  2667. * Log message for hard errors.
  2668. * Reset everything.
  2669. */
  2670. sym_log_hard_error(shost, sist, dstat);
  2671. if ((sist & (GEN|HTH|SGE)) ||
  2672. (dstat & (MDPE|BF|ABRT|IID))) {
  2673. sym_start_reset(np);
  2674. return IRQ_HANDLED;
  2675. }
  2676. unknown_int:
  2677. /*
  2678. * We just miss the cause of the interrupt. :(
  2679. * Print a message. The timeout will do the real work.
  2680. */
  2681. printf( "%s: unknown interrupt(s) ignored, "
  2682. "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n",
  2683. sym_name(np), istat, dstat, sist);
  2684. return IRQ_NONE;
  2685. }
  2686. /*
  2687. * Dequeue from the START queue all CCBs that match
  2688. * a given target/lun/task condition (-1 means all),
  2689. * and move them from the BUSY queue to the COMP queue
  2690. * with DID_SOFT_ERROR status condition.
  2691. * This function is used during error handling/recovery.
  2692. * It is called with SCRIPTS not running.
  2693. */
  2694. static int
  2695. sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task)
  2696. {
  2697. int j;
  2698. struct sym_ccb *cp;
  2699. /*
  2700. * Make sure the starting index is within range.
  2701. */
  2702. assert((i >= 0) && (i < 2*MAX_QUEUE));
  2703. /*
  2704. * Walk until end of START queue and dequeue every job
  2705. * that matches the target/lun/task condition.
  2706. */
  2707. j = i;
  2708. while (i != np->squeueput) {
  2709. cp = sym_ccb_from_dsa(np, scr_to_cpu(np->squeue[i]));
  2710. assert(cp);
  2711. #ifdef SYM_CONF_IARB_SUPPORT
  2712. /* Forget hints for IARB, they may be no longer relevant */
  2713. cp->host_flags &= ~HF_HINT_IARB;
  2714. #endif
  2715. if ((target == -1 || cp->target == target) &&
  2716. (lun == -1 || cp->lun == lun) &&
  2717. (task == -1 || cp->tag == task)) {
  2718. sym_set_cam_status(cp->cmd, DID_SOFT_ERROR);
  2719. sym_remque(&cp->link_ccbq);
  2720. sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
  2721. }
  2722. else {
  2723. if (i != j)
  2724. np->squeue[j] = np->squeue[i];
  2725. if ((j += 2) >= MAX_QUEUE*2) j = 0;
  2726. }
  2727. if ((i += 2) >= MAX_QUEUE*2) i = 0;
  2728. }
  2729. if (i != j) /* Copy back the idle task if needed */
  2730. np->squeue[j] = np->squeue[i];
  2731. np->squeueput = j; /* Update our current start queue pointer */
  2732. return (i - j) / 2;
  2733. }
  2734. /*
  2735. * chip handler for bad SCSI status condition
  2736. *
  2737. * In case of bad SCSI status, we unqueue all the tasks
  2738. * currently queued to the controller but not yet started
  2739. * and then restart the SCRIPTS processor immediately.
  2740. *
  2741. * QUEUE FULL and BUSY conditions are handled the same way.
  2742. * Basically all the not yet started tasks are requeued in
  2743. * device queue and the queue is frozen until a completion.
  2744. *
  2745. * For CHECK CONDITION and COMMAND TERMINATED status, we use
  2746. * the CCB of the failed command to prepare a REQUEST SENSE
  2747. * SCSI command and queue it to the controller queue.
  2748. *
  2749. * SCRATCHA is assumed to have been loaded with STARTPOS
  2750. * before the SCRIPTS called the C code.
  2751. */
  2752. static void sym_sir_bad_scsi_status(struct sym_hcb *np, int num, struct sym_ccb *cp)
  2753. {
  2754. u32 startp;
  2755. u_char s_status = cp->ssss_status;
  2756. u_char h_flags = cp->host_flags;
  2757. int msglen;
  2758. int i;
  2759. /*
  2760. * Compute the index of the next job to start from SCRIPTS.
  2761. */
  2762. i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
  2763. /*
  2764. * The last CCB queued used for IARB hint may be
  2765. * no longer relevant. Forget it.
  2766. */
  2767. #ifdef SYM_CONF_IARB_SUPPORT
  2768. if (np->last_cp)
  2769. np->last_cp = 0;
  2770. #endif
  2771. /*
  2772. * Now deal with the SCSI status.
  2773. */
  2774. switch(s_status) {
  2775. case S_BUSY:
  2776. case S_QUEUE_FULL:
  2777. if (sym_verbose >= 2) {
  2778. sym_print_addr(cp->cmd, "%s\n",
  2779. s_status == S_BUSY ? "BUSY" : "QUEUE FULL\n");
  2780. }
  2781. default: /* S_INT, S_INT_COND_MET, S_CONFLICT */
  2782. sym_complete_error (np, cp);
  2783. break;
  2784. case S_TERMINATED:
  2785. case S_CHECK_COND:
  2786. /*
  2787. * If we get an SCSI error when requesting sense, give up.
  2788. */
  2789. if (h_flags & HF_SENSE) {
  2790. sym_complete_error (np, cp);
  2791. break;
  2792. }
  2793. /*
  2794. * Dequeue all queued CCBs for that device not yet started,
  2795. * and restart the SCRIPTS processor immediately.
  2796. */
  2797. sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
  2798. OUTL_DSP(np, SCRIPTA_BA(np, start));
  2799. /*
  2800. * Save some info of the actual IO.
  2801. * Compute the data residual.
  2802. */
  2803. cp->sv_scsi_status = cp->ssss_status;
  2804. cp->sv_xerr_status = cp->xerr_status;
  2805. cp->sv_resid = sym_compute_residual(np, cp);
  2806. /*
  2807. * Prepare all needed data structures for
  2808. * requesting sense data.
  2809. */
  2810. cp->scsi_smsg2[0] = IDENTIFY(0, cp->lun);
  2811. msglen = 1;
  2812. /*
  2813. * If we are currently using anything different from
  2814. * async. 8 bit data transfers with that target,
  2815. * start a negotiation, since the device may want
  2816. * to report us a UNIT ATTENTION condition due to
  2817. * a cause we currently ignore, and we donnot want
  2818. * to be stuck with WIDE and/or SYNC data transfer.
  2819. *
  2820. * cp->nego_status is filled by sym_prepare_nego().
  2821. */
  2822. cp->nego_status = 0;
  2823. msglen += sym_prepare_nego(np, cp, &cp->scsi_smsg2[msglen]);
  2824. /*
  2825. * Message table indirect structure.
  2826. */
  2827. cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg2);
  2828. cp->phys.smsg.size = cpu_to_scr(msglen);
  2829. /*
  2830. * sense command
  2831. */
  2832. cp->phys.cmd.addr = CCB_BA(cp, sensecmd);
  2833. cp->phys.cmd.size = cpu_to_scr(6);
  2834. /*
  2835. * patch requested size into sense command
  2836. */
  2837. cp->sensecmd[0] = REQUEST_SENSE;
  2838. cp->sensecmd[1] = 0;
  2839. if (cp->cmd->device->scsi_level <= SCSI_2 && cp->lun <= 7)
  2840. cp->sensecmd[1] = cp->lun << 5;
  2841. cp->sensecmd[4] = SYM_SNS_BBUF_LEN;
  2842. cp->data_len = SYM_SNS_BBUF_LEN;
  2843. /*
  2844. * sense data
  2845. */
  2846. memset(cp->sns_bbuf, 0, SYM_SNS_BBUF_LEN);
  2847. cp->phys.sense.addr = CCB_BA(cp, sns_bbuf);
  2848. cp->phys.sense.size = cpu_to_scr(SYM_SNS_BBUF_LEN);
  2849. /*
  2850. * requeue the command.
  2851. */
  2852. startp = SCRIPTB_BA(np, sdata_in);
  2853. cp->phys.head.savep = cpu_to_scr(startp);
  2854. cp->phys.head.lastp = cpu_to_scr(startp);
  2855. cp->startp = cpu_to_scr(startp);
  2856. cp->goalp = cpu_to_scr(startp + 16);
  2857. cp->host_xflags = 0;
  2858. cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
  2859. cp->ssss_status = S_ILLEGAL;
  2860. cp->host_flags = (HF_SENSE|HF_DATA_IN);
  2861. cp->xerr_status = 0;
  2862. cp->extra_bytes = 0;
  2863. cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select));
  2864. /*
  2865. * Requeue the command.
  2866. */
  2867. sym_put_start_queue(np, cp);
  2868. /*
  2869. * Give back to upper layer everything we have dequeued.
  2870. */
  2871. sym_flush_comp_queue(np, 0);
  2872. break;
  2873. }
  2874. }
  2875. /*
  2876. * After a device has accepted some management message
  2877. * as BUS DEVICE RESET, ABORT TASK, etc ..., or when
  2878. * a device signals a UNIT ATTENTION condition, some
  2879. * tasks are thrown away by the device. We are required
  2880. * to reflect that on our tasks list since the device
  2881. * will never complete these tasks.
  2882. *
  2883. * This function move from the BUSY queue to the COMP
  2884. * queue all disconnected CCBs for a given target that
  2885. * match the following criteria:
  2886. * - lun=-1 means any logical UNIT otherwise a given one.
  2887. * - task=-1 means any task, otherwise a given one.
  2888. */
  2889. int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int task)
  2890. {
  2891. SYM_QUEHEAD qtmp, *qp;
  2892. int i = 0;
  2893. struct sym_ccb *cp;
  2894. /*
  2895. * Move the entire BUSY queue to our temporary queue.
  2896. */
  2897. sym_que_init(&qtmp);
  2898. sym_que_splice(&np->busy_ccbq, &qtmp);
  2899. sym_que_init(&np->busy_ccbq);
  2900. /*
  2901. * Put all CCBs that matches our criteria into
  2902. * the COMP queue and put back other ones into
  2903. * the BUSY queue.
  2904. */
  2905. while ((qp = sym_remque_head(&qtmp)) != NULL) {
  2906. struct scsi_cmnd *cmd;
  2907. cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
  2908. cmd = cp->cmd;
  2909. if (cp->host_status != HS_DISCONNECT ||
  2910. cp->target != target ||
  2911. (lun != -1 && cp->lun != lun) ||
  2912. (task != -1 &&
  2913. (cp->tag != NO_TAG && cp->scsi_smsg[2] != task))) {
  2914. sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
  2915. continue;
  2916. }
  2917. sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
  2918. /* Preserve the software timeout condition */
  2919. if (sym_get_cam_status(cmd) != DID_TIME_OUT)
  2920. sym_set_cam_status(cmd, cam_status);
  2921. ++i;
  2922. #if 0
  2923. printf("XXXX TASK @%p CLEARED\n", cp);
  2924. #endif
  2925. }
  2926. return i;
  2927. }
  2928. /*
  2929. * chip handler for TASKS recovery
  2930. *
  2931. * We cannot safely abort a command, while the SCRIPTS
  2932. * processor is running, since we just would be in race
  2933. * with it.
  2934. *
  2935. * As long as we have tasks to abort, we keep the SEM
  2936. * bit set in the ISTAT. When this bit is set, the
  2937. * SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED)
  2938. * each time it enters the scheduler.
  2939. *
  2940. * If we have to reset a target, clear tasks of a unit,
  2941. * or to perform the abort of a disconnected job, we
  2942. * restart the SCRIPTS for selecting the target. Once
  2943. * selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED).
  2944. * If it loses arbitration, the SCRIPTS will interrupt again
  2945. * the next time it will enter its scheduler, and so on ...
  2946. *
  2947. * On SIR_TARGET_SELECTED, we scan for the more
  2948. * appropriate thing to do:
  2949. *
  2950. * - If nothing, we just sent a M_ABORT message to the
  2951. * target to get rid of the useless SCSI bus ownership.
  2952. * According to the specs, no tasks shall be affected.
  2953. * - If the target is to be reset, we send it a M_RESET
  2954. * message.
  2955. * - If a logical UNIT is to be cleared , we send the
  2956. * IDENTIFY(lun) + M_ABORT.
  2957. * - If an untagged task is to be aborted, we send the
  2958. * IDENTIFY(lun) + M_ABORT.
  2959. * - If a tagged task is to be aborted, we send the
  2960. * IDENTIFY(lun) + task attributes + M_ABORT_TAG.
  2961. *
  2962. * Once our 'kiss of death' :) message has been accepted
  2963. * by the target, the SCRIPTS interrupts again
  2964. * (SIR_ABORT_SENT). On this interrupt, we complete
  2965. * all the CCBs that should have been aborted by the
  2966. * target according to our message.
  2967. */
  2968. static void sym_sir_task_recovery(struct sym_hcb *np, int num)
  2969. {
  2970. SYM_QUEHEAD *qp;
  2971. struct sym_ccb *cp;
  2972. struct sym_tcb *tp = NULL; /* gcc isn't quite smart enough yet */
  2973. struct scsi_target *starget;
  2974. int target=-1, lun=-1, task;
  2975. int i, k;
  2976. switch(num) {
  2977. /*
  2978. * The SCRIPTS processor stopped before starting
  2979. * the next command in order to allow us to perform
  2980. * some task recovery.
  2981. */
  2982. case SIR_SCRIPT_STOPPED:
  2983. /*
  2984. * Do we have any target to reset or unit to clear ?
  2985. */
  2986. for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
  2987. tp = &np->target[i];
  2988. if (tp->to_reset ||
  2989. (tp->lun0p && tp->lun0p->to_clear)) {
  2990. target = i;
  2991. break;
  2992. }
  2993. if (!tp->lunmp)
  2994. continue;
  2995. for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
  2996. if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
  2997. target = i;
  2998. break;
  2999. }
  3000. }
  3001. if (target != -1)
  3002. break;
  3003. }
  3004. /*
  3005. * If not, walk the busy queue for any
  3006. * disconnected CCB to be aborted.
  3007. */
  3008. if (target == -1) {
  3009. FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
  3010. cp = sym_que_entry(qp,struct sym_ccb,link_ccbq);
  3011. if (cp->host_status != HS_DISCONNECT)
  3012. continue;
  3013. if (cp->to_abort) {
  3014. target = cp->target;
  3015. break;
  3016. }
  3017. }
  3018. }
  3019. /*
  3020. * If some target is to be selected,
  3021. * prepare and start the selection.
  3022. */
  3023. if (target != -1) {
  3024. tp = &np->target[target];
  3025. np->abrt_sel.sel_id = target;
  3026. np->abrt_sel.sel_scntl3 = tp->head.wval;
  3027. np->abrt_sel.sel_sxfer = tp->head.sval;
  3028. OUTL(np, nc_dsa, np->hcb_ba);
  3029. OUTL_DSP(np, SCRIPTB_BA(np, sel_for_abort));
  3030. return;
  3031. }
  3032. /*
  3033. * Now look for a CCB to abort that haven't started yet.
  3034. * Btw, the SCRIPTS processor is still stopped, so
  3035. * we are not in race.
  3036. */
  3037. i = 0;
  3038. cp = NULL;
  3039. FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
  3040. cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
  3041. if (cp->host_status != HS_BUSY &&
  3042. cp->host_status != HS_NEGOTIATE)
  3043. continue;
  3044. if (!cp->to_abort)
  3045. continue;
  3046. #ifdef SYM_CONF_IARB_SUPPORT
  3047. /*
  3048. * If we are using IMMEDIATE ARBITRATION, we donnot
  3049. * want to cancel the last queued CCB, since the
  3050. * SCRIPTS may have anticipated the selection.
  3051. */
  3052. if (cp == np->last_cp) {
  3053. cp->to_abort = 0;
  3054. continue;
  3055. }
  3056. #endif
  3057. i = 1; /* Means we have found some */
  3058. break;
  3059. }
  3060. if (!i) {
  3061. /*
  3062. * We are done, so we donnot need
  3063. * to synchronize with the SCRIPTS anylonger.
  3064. * Remove the SEM flag from the ISTAT.
  3065. */
  3066. np->istat_sem = 0;
  3067. OUTB(np, nc_istat, SIGP);
  3068. break;
  3069. }
  3070. /*
  3071. * Compute index of next position in the start
  3072. * queue the SCRIPTS intends to start and dequeue
  3073. * all CCBs for that device that haven't been started.
  3074. */
  3075. i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
  3076. i = sym_dequeue_from_squeue(np, i, cp->target, cp->lun, -1);
  3077. /*
  3078. * Make sure at least our IO to abort has been dequeued.
  3079. */
  3080. #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
  3081. assert(i && sym_get_cam_status(cp->cmd) == DID_SOFT_ERROR);
  3082. #else
  3083. sym_remque(&cp->link_ccbq);
  3084. sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);
  3085. #endif
  3086. /*
  3087. * Keep track in cam status of the reason of the abort.
  3088. */
  3089. if (cp->to_abort == 2)
  3090. sym_set_cam_status(cp->cmd, DID_TIME_OUT);
  3091. else
  3092. sym_set_cam_status(cp->cmd, DID_ABORT);
  3093. /*
  3094. * Complete with error everything that we have dequeued.
  3095. */
  3096. sym_flush_comp_queue(np, 0);
  3097. break;
  3098. /*
  3099. * The SCRIPTS processor has selected a target
  3100. * we may have some manual recovery to perform for.
  3101. */
  3102. case SIR_TARGET_SELECTED:
  3103. target = INB(np, nc_sdid) & 0xf;
  3104. tp = &np->target[target];
  3105. np->abrt_tbl.addr = cpu_to_scr(vtobus(np->abrt_msg));
  3106. /*
  3107. * If the target is to be reset, prepare a
  3108. * M_RESET message and clear the to_reset flag
  3109. * since we donnot expect this operation to fail.
  3110. */
  3111. if (tp->to_reset) {
  3112. np->abrt_msg[0] = M_RESET;
  3113. np->abrt_tbl.size = 1;
  3114. tp->to_reset = 0;
  3115. break;
  3116. }
  3117. /*
  3118. * Otherwise, look for some logical unit to be cleared.
  3119. */
  3120. if (tp->lun0p && tp->lun0p->to_clear)
  3121. lun = 0;
  3122. else if (tp->lunmp) {
  3123. for (k = 1 ; k < SYM_CONF_MAX_LUN ; k++) {
  3124. if (tp->lunmp[k] && tp->lunmp[k]->to_clear) {
  3125. lun = k;
  3126. break;
  3127. }
  3128. }
  3129. }
  3130. /*
  3131. * If a logical unit is to be cleared, prepare
  3132. * an IDENTIFY(lun) + ABORT MESSAGE.
  3133. */
  3134. if (lun != -1) {
  3135. struct sym_lcb *lp = sym_lp(tp, lun);
  3136. lp->to_clear = 0; /* We don't expect to fail here */
  3137. np->abrt_msg[0] = IDENTIFY(0, lun);
  3138. np->abrt_msg[1] = M_ABORT;
  3139. np->abrt_tbl.size = 2;
  3140. break;
  3141. }
  3142. /*
  3143. * Otherwise, look for some disconnected job to
  3144. * abort for this target.
  3145. */
  3146. i = 0;
  3147. cp = NULL;
  3148. FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
  3149. cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
  3150. if (cp->host_status != HS_DISCONNECT)
  3151. continue;
  3152. if (cp->target != target)
  3153. continue;
  3154. if (!cp->to_abort)
  3155. continue;
  3156. i = 1; /* Means we have some */
  3157. break;
  3158. }
  3159. /*
  3160. * If we have none, probably since the device has
  3161. * completed the command before we won abitration,
  3162. * send a M_ABORT message without IDENTIFY.
  3163. * According to the specs, the device must just
  3164. * disconnect the BUS and not abort any task.
  3165. */
  3166. if (!i) {
  3167. np->abrt_msg[0] = M_ABORT;
  3168. np->abrt_tbl.size = 1;
  3169. break;
  3170. }
  3171. /*
  3172. * We have some task to abort.
  3173. * Set the IDENTIFY(lun)
  3174. */
  3175. np->abrt_msg[0] = IDENTIFY(0, cp->lun);
  3176. /*
  3177. * If we want to abort an untagged command, we
  3178. * will send a IDENTIFY + M_ABORT.
  3179. * Otherwise (tagged command), we will send
  3180. * a IDENTITFY + task attributes + ABORT TAG.
  3181. */
  3182. if (cp->tag == NO_TAG) {
  3183. np->abrt_msg[1] = M_ABORT;
  3184. np->abrt_tbl.size = 2;
  3185. } else {
  3186. np->abrt_msg[1] = cp->scsi_smsg[1];
  3187. np->abrt_msg[2] = cp->scsi_smsg[2];
  3188. np->abrt_msg[3] = M_ABORT_TAG;
  3189. np->abrt_tbl.size = 4;
  3190. }
  3191. /*
  3192. * Keep track of software timeout condition, since the
  3193. * peripheral driver may not count retries on abort
  3194. * conditions not due to timeout.
  3195. */
  3196. if (cp->to_abort == 2)
  3197. sym_set_cam_status(cp->cmd, DID_TIME_OUT);
  3198. cp->to_abort = 0; /* We donnot expect to fail here */
  3199. break;
  3200. /*
  3201. * The target has accepted our message and switched
  3202. * to BUS FREE phase as we expected.
  3203. */
  3204. case SIR_ABORT_SENT:
  3205. target = INB(np, nc_sdid) & 0xf;
  3206. tp = &np->target[target];
  3207. starget = tp->starget;
  3208. /*
  3209. ** If we didn't abort anything, leave here.
  3210. */
  3211. if (np->abrt_msg[0] == M_ABORT)
  3212. break;
  3213. /*
  3214. * If we sent a M_RESET, then a hardware reset has
  3215. * been performed by the target.
  3216. * - Reset everything to async 8 bit
  3217. * - Tell ourself to negotiate next time :-)
  3218. * - Prepare to clear all disconnected CCBs for
  3219. * this target from our task list (lun=task=-1)
  3220. */
  3221. lun = -1;
  3222. task = -1;
  3223. if (np->abrt_msg[0] == M_RESET) {
  3224. tp->head.sval = 0;
  3225. tp->head.wval = np->rv_scntl3;
  3226. tp->head.uval = 0;
  3227. spi_period(starget) = 0;
  3228. spi_offset(starget) = 0;
  3229. spi_width(starget) = 0;
  3230. spi_iu(starget) = 0;
  3231. spi_dt(starget) = 0;
  3232. spi_qas(starget) = 0;
  3233. tp->tgoal.check_nego = 1;
  3234. tp->tgoal.renego = 0;
  3235. }
  3236. /*
  3237. * Otherwise, check for the LUN and TASK(s)
  3238. * concerned by the cancelation.
  3239. * If it is not ABORT_TAG then it is CLEAR_QUEUE
  3240. * or an ABORT message :-)
  3241. */
  3242. else {
  3243. lun = np->abrt_msg[0] & 0x3f;
  3244. if (np->abrt_msg[1] == M_ABORT_TAG)
  3245. task = np->abrt_msg[2];
  3246. }
  3247. /*
  3248. * Complete all the CCBs the device should have
  3249. * aborted due to our 'kiss of death' message.
  3250. */
  3251. i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
  3252. sym_dequeue_from_squeue(np, i, target, lun, -1);
  3253. sym_clear_tasks(np, DID_ABORT, target, lun, task);
  3254. sym_flush_comp_queue(np, 0);
  3255. /*
  3256. * If we sent a BDR, make upper layer aware of that.
  3257. */
  3258. if (np->abrt_msg[0] == M_RESET)
  3259. starget_printk(KERN_NOTICE, starget,
  3260. "has been reset\n");
  3261. break;
  3262. }
  3263. /*
  3264. * Print to the log the message we intend to send.
  3265. */
  3266. if (num == SIR_TARGET_SELECTED) {
  3267. dev_info(&tp->starget->dev, "control msgout:");
  3268. sym_printl_hex(np->abrt_msg, np->abrt_tbl.size);
  3269. np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size);
  3270. }
  3271. /*
  3272. * Let the SCRIPTS processor continue.
  3273. */
  3274. OUTONB_STD();
  3275. }
  3276. /*
  3277. * Gerard's alchemy:) that deals with with the data
  3278. * pointer for both MDP and the residual calculation.
  3279. *
  3280. * I didn't want to bloat the code by more than 200
  3281. * lines for the handling of both MDP and the residual.
  3282. * This has been achieved by using a data pointer
  3283. * representation consisting in an index in the data
  3284. * array (dp_sg) and a negative offset (dp_ofs) that
  3285. * have the following meaning:
  3286. *
  3287. * - dp_sg = SYM_CONF_MAX_SG
  3288. * we are at the end of the data script.
  3289. * - dp_sg < SYM_CONF_MAX_SG
  3290. * dp_sg points to the next entry of the scatter array
  3291. * we want to transfer.
  3292. * - dp_ofs < 0
  3293. * dp_ofs represents the residual of bytes of the
  3294. * previous entry scatter entry we will send first.
  3295. * - dp_ofs = 0
  3296. * no residual to send first.
  3297. *
  3298. * The function sym_evaluate_dp() accepts an arbitray
  3299. * offset (basically from the MDP message) and returns
  3300. * the corresponding values of dp_sg and dp_ofs.
  3301. */
  3302. static int sym_evaluate_dp(struct sym_hcb *np, struct sym_ccb *cp, u32 scr, int *ofs)
  3303. {
  3304. u32 dp_scr;
  3305. int dp_ofs, dp_sg, dp_sgmin;
  3306. int tmp;
  3307. struct sym_pmc *pm;
  3308. /*
  3309. * Compute the resulted data pointer in term of a script
  3310. * address within some DATA script and a signed byte offset.
  3311. */
  3312. dp_scr = scr;
  3313. dp_ofs = *ofs;
  3314. if (dp_scr == SCRIPTA_BA(np, pm0_data))
  3315. pm = &cp->phys.pm0;
  3316. else if (dp_scr == SCRIPTA_BA(np, pm1_data))
  3317. pm = &cp->phys.pm1;
  3318. else
  3319. pm = NULL;
  3320. if (pm) {
  3321. dp_scr = scr_to_cpu(pm->ret);
  3322. dp_ofs -= scr_to_cpu(pm->sg.size) & 0x00ffffff;
  3323. }
  3324. /*
  3325. * If we are auto-sensing, then we are done.
  3326. */
  3327. if (cp->host_flags & HF_SENSE) {
  3328. *ofs = dp_ofs;
  3329. return 0;
  3330. }
  3331. /*
  3332. * Deduce the index of the sg entry.
  3333. * Keep track of the index of the first valid entry.
  3334. * If result is dp_sg = SYM_CONF_MAX_SG, then we are at the
  3335. * end of the data.
  3336. */
  3337. tmp = scr_to_cpu(cp->goalp);
  3338. dp_sg = SYM_CONF_MAX_SG;
  3339. if (dp_scr != tmp)
  3340. dp_sg -= (tmp - 8 - (int)dp_scr) / (2*4);
  3341. dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
  3342. /*
  3343. * Move to the sg entry the data pointer belongs to.
  3344. *
  3345. * If we are inside the data area, we expect result to be:
  3346. *
  3347. * Either,
  3348. * dp_ofs = 0 and dp_sg is the index of the sg entry
  3349. * the data pointer belongs to (or the end of the data)
  3350. * Or,
  3351. * dp_ofs < 0 and dp_sg is the index of the sg entry
  3352. * the data pointer belongs to + 1.
  3353. */
  3354. if (dp_ofs < 0) {
  3355. int n;
  3356. while (dp_sg > dp_sgmin) {
  3357. --dp_sg;
  3358. tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
  3359. n = dp_ofs + (tmp & 0xffffff);
  3360. if (n > 0) {
  3361. ++dp_sg;
  3362. break;
  3363. }
  3364. dp_ofs = n;
  3365. }
  3366. }
  3367. else if (dp_ofs > 0) {
  3368. while (dp_sg < SYM_CONF_MAX_SG) {
  3369. tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
  3370. dp_ofs -= (tmp & 0xffffff);
  3371. ++dp_sg;
  3372. if (dp_ofs <= 0)
  3373. break;
  3374. }
  3375. }
  3376. /*
  3377. * Make sure the data pointer is inside the data area.
  3378. * If not, return some error.
  3379. */
  3380. if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0))
  3381. goto out_err;
  3382. else if (dp_sg > SYM_CONF_MAX_SG ||
  3383. (dp_sg == SYM_CONF_MAX_SG && dp_ofs > 0))
  3384. goto out_err;
  3385. /*
  3386. * Save the extreme pointer if needed.
  3387. */
  3388. if (dp_sg > cp->ext_sg ||
  3389. (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) {
  3390. cp->ext_sg = dp_sg;
  3391. cp->ext_ofs = dp_ofs;
  3392. }
  3393. /*
  3394. * Return data.
  3395. */
  3396. *ofs = dp_ofs;
  3397. return dp_sg;
  3398. out_err:
  3399. return -1;
  3400. }
  3401. /*
  3402. * chip handler for MODIFY DATA POINTER MESSAGE
  3403. *
  3404. * We also call this function on IGNORE WIDE RESIDUE
  3405. * messages that do not match a SWIDE full condition.
  3406. * Btw, we assume in that situation that such a message
  3407. * is equivalent to a MODIFY DATA POINTER (offset=-1).
  3408. */
  3409. static void sym_modify_dp(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp, int ofs)
  3410. {
  3411. int dp_ofs = ofs;
  3412. u32 dp_scr = sym_get_script_dp (np, cp);
  3413. u32 dp_ret;
  3414. u32 tmp;
  3415. u_char hflags;
  3416. int dp_sg;
  3417. struct sym_pmc *pm;
  3418. /*
  3419. * Not supported for auto-sense.
  3420. */
  3421. if (cp->host_flags & HF_SENSE)
  3422. goto out_reject;
  3423. /*
  3424. * Apply our alchemy:) (see comments in sym_evaluate_dp()),
  3425. * to the resulted data pointer.
  3426. */
  3427. dp_sg = sym_evaluate_dp(np, cp, dp_scr, &dp_ofs);
  3428. if (dp_sg < 0)
  3429. goto out_reject;
  3430. /*
  3431. * And our alchemy:) allows to easily calculate the data
  3432. * script address we want to return for the next data phase.
  3433. */
  3434. dp_ret = cpu_to_scr(cp->goalp);
  3435. dp_ret = dp_ret - 8 - (SYM_CONF_MAX_SG - dp_sg) * (2*4);
  3436. /*
  3437. * If offset / scatter entry is zero we donnot need
  3438. * a context for the new current data pointer.
  3439. */
  3440. if (dp_ofs == 0) {
  3441. dp_scr = dp_ret;
  3442. goto out_ok;
  3443. }
  3444. /*
  3445. * Get a context for the new current data pointer.
  3446. */
  3447. hflags = INB(np, HF_PRT);
  3448. if (hflags & HF_DP_SAVED)
  3449. hflags ^= HF_ACT_PM;
  3450. if (!(hflags & HF_ACT_PM)) {
  3451. pm = &cp->phys.pm0;
  3452. dp_scr = SCRIPTA_BA(np, pm0_data);
  3453. }
  3454. else {
  3455. pm = &cp->phys.pm1;
  3456. dp_scr = SCRIPTA_BA(np, pm1_data);
  3457. }
  3458. hflags &= ~(HF_DP_SAVED);
  3459. OUTB(np, HF_PRT, hflags);
  3460. /*
  3461. * Set up the new current data pointer.
  3462. * ofs < 0 there, and for the next data phase, we
  3463. * want to transfer part of the data of the sg entry
  3464. * corresponding to index dp_sg-1 prior to returning
  3465. * to the main data script.
  3466. */
  3467. pm->ret = cpu_to_scr(dp_ret);
  3468. tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr);
  3469. tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size) + dp_ofs;
  3470. pm->sg.addr = cpu_to_scr(tmp);
  3471. pm->sg.size = cpu_to_scr(-dp_ofs);
  3472. out_ok:
  3473. sym_set_script_dp (np, cp, dp_scr);
  3474. OUTL_DSP(np, SCRIPTA_BA(np, clrack));
  3475. return;
  3476. out_reject:
  3477. OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
  3478. }
  3479. /*
  3480. * chip calculation of the data residual.
  3481. *
  3482. * As I used to say, the requirement of data residual
  3483. * in SCSI is broken, useless and cannot be achieved
  3484. * without huge complexity.
  3485. * But most OSes and even the official CAM require it.
  3486. * When stupidity happens to be so widely spread inside
  3487. * a community, it gets hard to convince.
  3488. *
  3489. * Anyway, I don't care, since I am not going to use
  3490. * any software that considers this data residual as
  3491. * a relevant information. :)
  3492. */
  3493. int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
  3494. {
  3495. int dp_sg, dp_sgmin, resid = 0;
  3496. int dp_ofs = 0;
  3497. /*
  3498. * Check for some data lost or just thrown away.
  3499. * We are not required to be quite accurate in this
  3500. * situation. Btw, if we are odd for output and the
  3501. * device claims some more data, it may well happen
  3502. * than our residual be zero. :-)
  3503. */
  3504. if (cp->xerr_status & (XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN)) {
  3505. if (cp->xerr_status & XE_EXTRA_DATA)
  3506. resid -= cp->extra_bytes;
  3507. if (cp->xerr_status & XE_SODL_UNRUN)
  3508. ++resid;
  3509. if (cp->xerr_status & XE_SWIDE_OVRUN)
  3510. --resid;
  3511. }
  3512. /*
  3513. * If all data has been transferred,
  3514. * there is no residual.
  3515. */
  3516. if (cp->phys.head.lastp == cp->goalp)
  3517. return resid;
  3518. /*
  3519. * If no data transfer occurs, or if the data
  3520. * pointer is weird, return full residual.
  3521. */
  3522. if (cp->startp == cp->phys.head.lastp ||
  3523. sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp),
  3524. &dp_ofs) < 0) {
  3525. return cp->data_len - cp->odd_byte_adjustment;
  3526. }
  3527. /*
  3528. * If we were auto-sensing, then we are done.
  3529. */
  3530. if (cp->host_flags & HF_SENSE) {
  3531. return -dp_ofs;
  3532. }
  3533. /*
  3534. * We are now full comfortable in the computation
  3535. * of the data residual (2's complement).
  3536. */
  3537. dp_sgmin = SYM_CONF_MAX_SG - cp->segments;
  3538. resid = -cp->ext_ofs;
  3539. for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) {
  3540. u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size);
  3541. resid += (tmp & 0xffffff);
  3542. }
  3543. resid -= cp->odd_byte_adjustment;
  3544. /*
  3545. * Hopefully, the result is not too wrong.
  3546. */
  3547. return resid;
  3548. }
  3549. /*
  3550. * Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER.
  3551. *
  3552. * When we try to negotiate, we append the negotiation message
  3553. * to the identify and (maybe) simple tag message.
  3554. * The host status field is set to HS_NEGOTIATE to mark this
  3555. * situation.
  3556. *
  3557. * If the target doesn't answer this message immediately
  3558. * (as required by the standard), the SIR_NEGO_FAILED interrupt
  3559. * will be raised eventually.
  3560. * The handler removes the HS_NEGOTIATE status, and sets the
  3561. * negotiated value to the default (async / nowide).
  3562. *
  3563. * If we receive a matching answer immediately, we check it
  3564. * for validity, and set the values.
  3565. *
  3566. * If we receive a Reject message immediately, we assume the
  3567. * negotiation has failed, and fall back to standard values.
  3568. *
  3569. * If we receive a negotiation message while not in HS_NEGOTIATE
  3570. * state, it's a target initiated negotiation. We prepare a
  3571. * (hopefully) valid answer, set our parameters, and send back
  3572. * this answer to the target.
  3573. *
  3574. * If the target doesn't fetch the answer (no message out phase),
  3575. * we assume the negotiation has failed, and fall back to default
  3576. * settings (SIR_NEGO_PROTO interrupt).
  3577. *
  3578. * When we set the values, we adjust them in all ccbs belonging
  3579. * to this target, in the controller's register, and in the "phys"
  3580. * field of the controller's struct sym_hcb.
  3581. */
  3582. /*
  3583. * chip handler for SYNCHRONOUS DATA TRANSFER REQUEST (SDTR) message.
  3584. */
  3585. static int
  3586. sym_sync_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp)
  3587. {
  3588. int target = cp->target;
  3589. u_char chg, ofs, per, fak, div;
  3590. if (DEBUG_FLAGS & DEBUG_NEGO) {
  3591. sym_print_nego_msg(np, target, "sync msgin", np->msgin);
  3592. }
  3593. /*
  3594. * Get requested values.
  3595. */
  3596. chg = 0;
  3597. per = np->msgin[3];
  3598. ofs = np->msgin[4];
  3599. /*
  3600. * Check values against our limits.
  3601. */
  3602. if (ofs) {
  3603. if (ofs > np->maxoffs)
  3604. {chg = 1; ofs = np->maxoffs;}
  3605. }
  3606. if (ofs) {
  3607. if (per < np->minsync)
  3608. {chg = 1; per = np->minsync;}
  3609. }
  3610. /*
  3611. * Get new chip synchronous parameters value.
  3612. */
  3613. div = fak = 0;
  3614. if (ofs && sym_getsync(np, 0, per, &div, &fak) < 0)
  3615. goto reject_it;
  3616. if (DEBUG_FLAGS & DEBUG_NEGO) {
  3617. sym_print_addr(cp->cmd,
  3618. "sdtr: ofs=%d per=%d div=%d fak=%d chg=%d.\n",
  3619. ofs, per, div, fak, chg);
  3620. }
  3621. /*
  3622. * If it was an answer we want to change,
  3623. * then it isn't acceptable. Reject it.
  3624. */
  3625. if (!req && chg)
  3626. goto reject_it;
  3627. /*
  3628. * Apply new values.
  3629. */
  3630. sym_setsync (np, target, ofs, per, div, fak);
  3631. /*
  3632. * It was an answer. We are done.
  3633. */
  3634. if (!req)
  3635. return 0;
  3636. /*
  3637. * It was a request. Prepare an answer message.
  3638. */
  3639. spi_populate_sync_msg(np->msgout, per, ofs);
  3640. if (DEBUG_FLAGS & DEBUG_NEGO) {
  3641. sym_print_nego_msg(np, target, "sync msgout", np->msgout);
  3642. }
  3643. np->msgin [0] = M_NOOP;
  3644. return 0;
  3645. reject_it:
  3646. sym_setsync (np, target, 0, 0, 0, 0);
  3647. return -1;
  3648. }
  3649. static void sym_sync_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
  3650. {
  3651. int req = 1;
  3652. int result;
  3653. /*
  3654. * Request or answer ?
  3655. */
  3656. if (INB(np, HS_PRT) == HS_NEGOTIATE) {
  3657. OUTB(np, HS_PRT, HS_BUSY);
  3658. if (cp->nego_status && cp->nego_status != NS_SYNC)
  3659. goto reject_it;
  3660. req = 0;
  3661. }
  3662. /*
  3663. * Check and apply new values.
  3664. */
  3665. result = sym_sync_nego_check(np, req, cp);
  3666. if (result) /* Not acceptable, reject it */
  3667. goto reject_it;
  3668. if (req) { /* Was a request, send response. */
  3669. cp->nego_status = NS_SYNC;
  3670. OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp));
  3671. }
  3672. else /* Was a response, we are done. */
  3673. OUTL_DSP(np, SCRIPTA_BA(np, clrack));
  3674. return;
  3675. reject_it:
  3676. OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
  3677. }
  3678. /*
  3679. * chip handler for PARALLEL PROTOCOL REQUEST (PPR) message.
  3680. */
  3681. static int
  3682. sym_ppr_nego_check(struct sym_hcb *np, int req, int target)
  3683. {
  3684. struct sym_tcb *tp = &np->target[target];
  3685. unsigned char fak, div;
  3686. int dt, chg = 0;
  3687. unsigned char per = np->msgin[3];
  3688. unsigned char ofs = np->msgin[5];
  3689. unsigned char wide = np->msgin[6];
  3690. unsigned char opts = np->msgin[7] & PPR_OPT_MASK;
  3691. if (DEBUG_FLAGS & DEBUG_NEGO) {
  3692. sym_print_nego_msg(np, target, "ppr msgin", np->msgin);
  3693. }
  3694. /*
  3695. * Check values against our limits.
  3696. */
  3697. if (wide > np->maxwide) {
  3698. chg = 1;
  3699. wide = np->maxwide;
  3700. }
  3701. if (!wide || !(np->features & FE_U3EN))
  3702. opts = 0;
  3703. if (opts != (np->msgin[7] & PPR_OPT_MASK))
  3704. chg = 1;
  3705. dt = opts & PPR_OPT_DT;
  3706. if (ofs) {
  3707. unsigned char maxoffs = dt ? np->maxoffs_dt : np->maxoffs;
  3708. if (ofs > maxoffs) {
  3709. chg = 1;
  3710. ofs = maxoffs;
  3711. }
  3712. }
  3713. if (ofs) {
  3714. unsigned char minsync = dt ? np->minsync_dt : np->minsync;
  3715. if (per < minsync) {
  3716. chg = 1;
  3717. per = minsync;
  3718. }
  3719. }
  3720. /*
  3721. * Get new chip synchronous parameters value.
  3722. */
  3723. div = fak = 0;
  3724. if (ofs && sym_getsync(np, dt, per, &div, &fak) < 0)
  3725. goto reject_it;
  3726. /*
  3727. * If it was an answer we want to change,
  3728. * then it isn't acceptable. Reject it.
  3729. */
  3730. if (!req && chg)
  3731. goto reject_it;
  3732. /*
  3733. * Apply new values.
  3734. */
  3735. sym_setpprot(np, target, opts, ofs, per, wide, div, fak);
  3736. /*
  3737. * It was an answer. We are done.
  3738. */
  3739. if (!req)
  3740. return 0;
  3741. /*
  3742. * It was a request. Prepare an answer message.
  3743. */
  3744. spi_populate_ppr_msg(np->msgout, per, ofs, wide, opts);
  3745. if (DEBUG_FLAGS & DEBUG_NEGO) {
  3746. sym_print_nego_msg(np, target, "ppr msgout", np->msgout);
  3747. }
  3748. np->msgin [0] = M_NOOP;
  3749. return 0;
  3750. reject_it:
  3751. sym_setpprot (np, target, 0, 0, 0, 0, 0, 0);
  3752. /*
  3753. * If it is a device response that should result in
  3754. * ST, we may want to try a legacy negotiation later.
  3755. */
  3756. if (!req && !opts) {
  3757. tp->tgoal.period = per;
  3758. tp->tgoal.offset = ofs;
  3759. tp->tgoal.width = wide;
  3760. tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
  3761. tp->tgoal.check_nego = 1;
  3762. }
  3763. return -1;
  3764. }
  3765. static void sym_ppr_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
  3766. {
  3767. int req = 1;
  3768. int result;
  3769. /*
  3770. * Request or answer ?
  3771. */
  3772. if (INB(np, HS_PRT) == HS_NEGOTIATE) {
  3773. OUTB(np, HS_PRT, HS_BUSY);
  3774. if (cp->nego_status && cp->nego_status != NS_PPR)
  3775. goto reject_it;
  3776. req = 0;
  3777. }
  3778. /*
  3779. * Check and apply new values.
  3780. */
  3781. result = sym_ppr_nego_check(np, req, cp->target);
  3782. if (result) /* Not acceptable, reject it */
  3783. goto reject_it;
  3784. if (req) { /* Was a request, send response. */
  3785. cp->nego_status = NS_PPR;
  3786. OUTL_DSP(np, SCRIPTB_BA(np, ppr_resp));
  3787. }
  3788. else /* Was a response, we are done. */
  3789. OUTL_DSP(np, SCRIPTA_BA(np, clrack));
  3790. return;
  3791. reject_it:
  3792. OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
  3793. }
  3794. /*
  3795. * chip handler for WIDE DATA TRANSFER REQUEST (WDTR) message.
  3796. */
  3797. static int
  3798. sym_wide_nego_check(struct sym_hcb *np, int req, struct sym_ccb *cp)
  3799. {
  3800. int target = cp->target;
  3801. u_char chg, wide;
  3802. if (DEBUG_FLAGS & DEBUG_NEGO) {
  3803. sym_print_nego_msg(np, target, "wide msgin", np->msgin);
  3804. }
  3805. /*
  3806. * Get requested values.
  3807. */
  3808. chg = 0;
  3809. wide = np->msgin[3];
  3810. /*
  3811. * Check values against our limits.
  3812. */
  3813. if (wide > np->maxwide) {
  3814. chg = 1;
  3815. wide = np->maxwide;
  3816. }
  3817. if (DEBUG_FLAGS & DEBUG_NEGO) {
  3818. sym_print_addr(cp->cmd, "wdtr: wide=%d chg=%d.\n",
  3819. wide, chg);
  3820. }
  3821. /*
  3822. * If it was an answer we want to change,
  3823. * then it isn't acceptable. Reject it.
  3824. */
  3825. if (!req && chg)
  3826. goto reject_it;
  3827. /*
  3828. * Apply new values.
  3829. */
  3830. sym_setwide (np, target, wide);
  3831. /*
  3832. * It was an answer. We are done.
  3833. */
  3834. if (!req)
  3835. return 0;
  3836. /*
  3837. * It was a request. Prepare an answer message.
  3838. */
  3839. spi_populate_width_msg(np->msgout, wide);
  3840. np->msgin [0] = M_NOOP;
  3841. if (DEBUG_FLAGS & DEBUG_NEGO) {
  3842. sym_print_nego_msg(np, target, "wide msgout", np->msgout);
  3843. }
  3844. return 0;
  3845. reject_it:
  3846. return -1;
  3847. }
  3848. static void sym_wide_nego(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
  3849. {
  3850. int req = 1;
  3851. int result;
  3852. /*
  3853. * Request or answer ?
  3854. */
  3855. if (INB(np, HS_PRT) == HS_NEGOTIATE) {
  3856. OUTB(np, HS_PRT, HS_BUSY);
  3857. if (cp->nego_status && cp->nego_status != NS_WIDE)
  3858. goto reject_it;
  3859. req = 0;
  3860. }
  3861. /*
  3862. * Check and apply new values.
  3863. */
  3864. result = sym_wide_nego_check(np, req, cp);
  3865. if (result) /* Not acceptable, reject it */
  3866. goto reject_it;
  3867. if (req) { /* Was a request, send response. */
  3868. cp->nego_status = NS_WIDE;
  3869. OUTL_DSP(np, SCRIPTB_BA(np, wdtr_resp));
  3870. } else { /* Was a response. */
  3871. /*
  3872. * Negotiate for SYNC immediately after WIDE response.
  3873. * This allows to negotiate for both WIDE and SYNC on
  3874. * a single SCSI command (Suggested by Justin Gibbs).
  3875. */
  3876. if (tp->tgoal.offset) {
  3877. spi_populate_sync_msg(np->msgout, tp->tgoal.period,
  3878. tp->tgoal.offset);
  3879. if (DEBUG_FLAGS & DEBUG_NEGO) {
  3880. sym_print_nego_msg(np, cp->target,
  3881. "sync msgout", np->msgout);
  3882. }
  3883. cp->nego_status = NS_SYNC;
  3884. OUTB(np, HS_PRT, HS_NEGOTIATE);
  3885. OUTL_DSP(np, SCRIPTB_BA(np, sdtr_resp));
  3886. return;
  3887. } else
  3888. OUTL_DSP(np, SCRIPTA_BA(np, clrack));
  3889. }
  3890. return;
  3891. reject_it:
  3892. OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
  3893. }
  3894. /*
  3895. * Reset DT, SYNC or WIDE to default settings.
  3896. *
  3897. * Called when a negotiation does not succeed either
  3898. * on rejection or on protocol error.
  3899. *
  3900. * A target that understands a PPR message should never
  3901. * reject it, and messing with it is very unlikely.
  3902. * So, if a PPR makes problems, we may just want to
  3903. * try a legacy negotiation later.
  3904. */
  3905. static void sym_nego_default(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
  3906. {
  3907. switch (cp->nego_status) {
  3908. case NS_PPR:
  3909. #if 0
  3910. sym_setpprot (np, cp->target, 0, 0, 0, 0, 0, 0);
  3911. #else
  3912. if (tp->tgoal.period < np->minsync)
  3913. tp->tgoal.period = np->minsync;
  3914. if (tp->tgoal.offset > np->maxoffs)
  3915. tp->tgoal.offset = np->maxoffs;
  3916. tp->tgoal.iu = tp->tgoal.dt = tp->tgoal.qas = 0;
  3917. tp->tgoal.check_nego = 1;
  3918. #endif
  3919. break;
  3920. case NS_SYNC:
  3921. sym_setsync (np, cp->target, 0, 0, 0, 0);
  3922. break;
  3923. case NS_WIDE:
  3924. sym_setwide (np, cp->target, 0);
  3925. break;
  3926. }
  3927. np->msgin [0] = M_NOOP;
  3928. np->msgout[0] = M_NOOP;
  3929. cp->nego_status = 0;
  3930. }
  3931. /*
  3932. * chip handler for MESSAGE REJECT received in response to
  3933. * PPR, WIDE or SYNCHRONOUS negotiation.
  3934. */
  3935. static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym_ccb *cp)
  3936. {
  3937. sym_nego_default(np, tp, cp);
  3938. OUTB(np, HS_PRT, HS_BUSY);
  3939. }
  3940. /*
  3941. * chip exception handler for programmed interrupts.
  3942. */
  3943. static void sym_int_sir(struct sym_hcb *np)
  3944. {
  3945. u_char num = INB(np, nc_dsps);
  3946. u32 dsa = INL(np, nc_dsa);
  3947. struct sym_ccb *cp = sym_ccb_from_dsa(np, dsa);
  3948. u_char target = INB(np, nc_sdid) & 0x0f;
  3949. struct sym_tcb *tp = &np->target[target];
  3950. int tmp;
  3951. if (DEBUG_FLAGS & DEBUG_TINY) printf ("I#%d", num);
  3952. switch (num) {
  3953. #if SYM_CONF_DMA_ADDRESSING_MODE == 2
  3954. /*
  3955. * SCRIPTS tell us that we may have to update
  3956. * 64 bit DMA segment registers.
  3957. */
  3958. case SIR_DMAP_DIRTY:
  3959. sym_update_dmap_regs(np);
  3960. goto out;
  3961. #endif
  3962. /*
  3963. * Command has been completed with error condition
  3964. * or has been auto-sensed.
  3965. */
  3966. case SIR_COMPLETE_ERROR:
  3967. sym_complete_error(np, cp);
  3968. return;
  3969. /*
  3970. * The C code is currently trying to recover from something.
  3971. * Typically, user want to abort some command.
  3972. */
  3973. case SIR_SCRIPT_STOPPED:
  3974. case SIR_TARGET_SELECTED:
  3975. case SIR_ABORT_SENT:
  3976. sym_sir_task_recovery(np, num);
  3977. return;
  3978. /*
  3979. * The device didn't go to MSG OUT phase after having
  3980. * been selected with ATN. We do not want to handle that.
  3981. */
  3982. case SIR_SEL_ATN_NO_MSG_OUT:
  3983. scmd_printk(KERN_WARNING, cp->cmd,
  3984. "No MSG OUT phase after selection with ATN\n");
  3985. goto out_stuck;
  3986. /*
  3987. * The device didn't switch to MSG IN phase after
  3988. * having reselected the initiator.
  3989. */
  3990. case SIR_RESEL_NO_MSG_IN:
  3991. scmd_printk(KERN_WARNING, cp->cmd,
  3992. "No MSG IN phase after reselection\n");
  3993. goto out_stuck;
  3994. /*
  3995. * After reselection, the device sent a message that wasn't
  3996. * an IDENTIFY.
  3997. */
  3998. case SIR_RESEL_NO_IDENTIFY:
  3999. scmd_printk(KERN_WARNING, cp->cmd,
  4000. "No IDENTIFY after reselection\n");
  4001. goto out_stuck;
  4002. /*
  4003. * The device reselected a LUN we do not know about.
  4004. */
  4005. case SIR_RESEL_BAD_LUN:
  4006. np->msgout[0] = M_RESET;
  4007. goto out;
  4008. /*
  4009. * The device reselected for an untagged nexus and we
  4010. * haven't any.
  4011. */
  4012. case SIR_RESEL_BAD_I_T_L:
  4013. np->msgout[0] = M_ABORT;
  4014. goto out;
  4015. /*
  4016. * The device reselected for a tagged nexus that we do not have.
  4017. */
  4018. case SIR_RESEL_BAD_I_T_L_Q:
  4019. np->msgout[0] = M_ABORT_TAG;
  4020. goto out;
  4021. /*
  4022. * The SCRIPTS let us know that the device has grabbed
  4023. * our message and will abort the job.
  4024. */
  4025. case SIR_RESEL_ABORTED:
  4026. np->lastmsg = np->msgout[0];
  4027. np->msgout[0] = M_NOOP;
  4028. scmd_printk(KERN_WARNING, cp->cmd,
  4029. "message %x sent on bad reselection\n", np->lastmsg);
  4030. goto out;
  4031. /*
  4032. * The SCRIPTS let us know that a message has been
  4033. * successfully sent to the device.
  4034. */
  4035. case SIR_MSG_OUT_DONE:
  4036. np->lastmsg = np->msgout[0];
  4037. np->msgout[0] = M_NOOP;
  4038. /* Should we really care of that */
  4039. if (np->lastmsg == M_PARITY || np->lastmsg == M_ID_ERROR) {
  4040. if (cp) {
  4041. cp->xerr_status &= ~XE_PARITY_ERR;
  4042. if (!cp->xerr_status)
  4043. OUTOFFB(np, HF_PRT, HF_EXT_ERR);
  4044. }
  4045. }
  4046. goto out;
  4047. /*
  4048. * The device didn't send a GOOD SCSI status.
  4049. * We may have some work to do prior to allow
  4050. * the SCRIPTS processor to continue.
  4051. */
  4052. case SIR_BAD_SCSI_STATUS:
  4053. if (!cp)
  4054. goto out;
  4055. sym_sir_bad_scsi_status(np, num, cp);
  4056. return;
  4057. /*
  4058. * We are asked by the SCRIPTS to prepare a
  4059. * REJECT message.
  4060. */
  4061. case SIR_REJECT_TO_SEND:
  4062. sym_print_msg(cp, "M_REJECT to send for ", np->msgin);
  4063. np->msgout[0] = M_REJECT;
  4064. goto out;
  4065. /*
  4066. * We have been ODD at the end of a DATA IN
  4067. * transfer and the device didn't send a
  4068. * IGNORE WIDE RESIDUE message.
  4069. * It is a data overrun condition.
  4070. */
  4071. case SIR_SWIDE_OVERRUN:
  4072. if (cp) {
  4073. OUTONB(np, HF_PRT, HF_EXT_ERR);
  4074. cp->xerr_status |= XE_SWIDE_OVRUN;
  4075. }
  4076. goto out;
  4077. /*
  4078. * We have been ODD at the end of a DATA OUT
  4079. * transfer.
  4080. * It is a data underrun condition.
  4081. */
  4082. case SIR_SODL_UNDERRUN:
  4083. if (cp) {
  4084. OUTONB(np, HF_PRT, HF_EXT_ERR);
  4085. cp->xerr_status |= XE_SODL_UNRUN;
  4086. }
  4087. goto out;
  4088. /*
  4089. * The device wants us to tranfer more data than
  4090. * expected or in the wrong direction.
  4091. * The number of extra bytes is in scratcha.
  4092. * It is a data overrun condition.
  4093. */
  4094. case SIR_DATA_OVERRUN:
  4095. if (cp) {
  4096. OUTONB(np, HF_PRT, HF_EXT_ERR);
  4097. cp->xerr_status |= XE_EXTRA_DATA;
  4098. cp->extra_bytes += INL(np, nc_scratcha);
  4099. }
  4100. goto out;
  4101. /*
  4102. * The device switched to an illegal phase (4/5).
  4103. */
  4104. case SIR_BAD_PHASE:
  4105. if (cp) {
  4106. OUTONB(np, HF_PRT, HF_EXT_ERR);
  4107. cp->xerr_status |= XE_BAD_PHASE;
  4108. }
  4109. goto out;
  4110. /*
  4111. * We received a message.
  4112. */
  4113. case SIR_MSG_RECEIVED:
  4114. if (!cp)
  4115. goto out_stuck;
  4116. switch (np->msgin [0]) {
  4117. /*
  4118. * We received an extended message.
  4119. * We handle MODIFY DATA POINTER, SDTR, WDTR
  4120. * and reject all other extended messages.
  4121. */
  4122. case M_EXTENDED:
  4123. switch (np->msgin [2]) {
  4124. case M_X_MODIFY_DP:
  4125. if (DEBUG_FLAGS & DEBUG_POINTER)
  4126. sym_print_msg(cp, "extended msg ",
  4127. np->msgin);
  4128. tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) +
  4129. (np->msgin[5]<<8) + (np->msgin[6]);
  4130. sym_modify_dp(np, tp, cp, tmp);
  4131. return;
  4132. case M_X_SYNC_REQ:
  4133. sym_sync_nego(np, tp, cp);
  4134. return;
  4135. case M_X_PPR_REQ:
  4136. sym_ppr_nego(np, tp, cp);
  4137. return;
  4138. case M_X_WIDE_REQ:
  4139. sym_wide_nego(np, tp, cp);
  4140. return;
  4141. default:
  4142. goto out_reject;
  4143. }
  4144. break;
  4145. /*
  4146. * We received a 1/2 byte message not handled from SCRIPTS.
  4147. * We are only expecting MESSAGE REJECT and IGNORE WIDE
  4148. * RESIDUE messages that haven't been anticipated by
  4149. * SCRIPTS on SWIDE full condition. Unanticipated IGNORE
  4150. * WIDE RESIDUE messages are aliased as MODIFY DP (-1).
  4151. */
  4152. case M_IGN_RESIDUE:
  4153. if (DEBUG_FLAGS & DEBUG_POINTER)
  4154. sym_print_msg(cp, "1 or 2 byte ", np->msgin);
  4155. if (cp->host_flags & HF_SENSE)
  4156. OUTL_DSP(np, SCRIPTA_BA(np, clrack));
  4157. else
  4158. sym_modify_dp(np, tp, cp, -1);
  4159. return;
  4160. case M_REJECT:
  4161. if (INB(np, HS_PRT) == HS_NEGOTIATE)
  4162. sym_nego_rejected(np, tp, cp);
  4163. else {
  4164. sym_print_addr(cp->cmd,
  4165. "M_REJECT received (%x:%x).\n",
  4166. scr_to_cpu(np->lastmsg), np->msgout[0]);
  4167. }
  4168. goto out_clrack;
  4169. break;
  4170. default:
  4171. goto out_reject;
  4172. }
  4173. break;
  4174. /*
  4175. * We received an unknown message.
  4176. * Ignore all MSG IN phases and reject it.
  4177. */
  4178. case SIR_MSG_WEIRD:
  4179. sym_print_msg(cp, "WEIRD message received", np->msgin);
  4180. OUTL_DSP(np, SCRIPTB_BA(np, msg_weird));
  4181. return;
  4182. /*
  4183. * Negotiation failed.
  4184. * Target does not send us the reply.
  4185. * Remove the HS_NEGOTIATE status.
  4186. */
  4187. case SIR_NEGO_FAILED:
  4188. OUTB(np, HS_PRT, HS_BUSY);
  4189. /*
  4190. * Negotiation failed.
  4191. * Target does not want answer message.
  4192. */
  4193. case SIR_NEGO_PROTO:
  4194. sym_nego_default(np, tp, cp);
  4195. goto out;
  4196. }
  4197. out:
  4198. OUTONB_STD();
  4199. return;
  4200. out_reject:
  4201. OUTL_DSP(np, SCRIPTB_BA(np, msg_bad));
  4202. return;
  4203. out_clrack:
  4204. OUTL_DSP(np, SCRIPTA_BA(np, clrack));
  4205. return;
  4206. out_stuck:
  4207. return;
  4208. }
  4209. /*
  4210. * Acquire a control block
  4211. */
  4212. struct sym_ccb *sym_get_ccb (struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order)
  4213. {
  4214. u_char tn = cmd->device->id;
  4215. u_char ln = cmd->device->lun;
  4216. struct sym_tcb *tp = &np->target[tn];
  4217. struct sym_lcb *lp = sym_lp(tp, ln);
  4218. u_short tag = NO_TAG;
  4219. SYM_QUEHEAD *qp;
  4220. struct sym_ccb *cp = NULL;
  4221. /*
  4222. * Look for a free CCB
  4223. */
  4224. if (sym_que_empty(&np->free_ccbq))
  4225. sym_alloc_ccb(np);
  4226. qp = sym_remque_head(&np->free_ccbq);
  4227. if (!qp)
  4228. goto out;
  4229. cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
  4230. {
  4231. /*
  4232. * If we have been asked for a tagged command.
  4233. */
  4234. if (tag_order) {
  4235. /*
  4236. * Debugging purpose.
  4237. */
  4238. #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
  4239. if (lp->busy_itl != 0)
  4240. goto out_free;
  4241. #endif
  4242. /*
  4243. * Allocate resources for tags if not yet.
  4244. */
  4245. if (!lp->cb_tags) {
  4246. sym_alloc_lcb_tags(np, tn, ln);
  4247. if (!lp->cb_tags)
  4248. goto out_free;
  4249. }
  4250. /*
  4251. * Get a tag for this SCSI IO and set up
  4252. * the CCB bus address for reselection,
  4253. * and count it for this LUN.
  4254. * Toggle reselect path to tagged.
  4255. */
  4256. if (lp->busy_itlq < SYM_CONF_MAX_TASK) {
  4257. tag = lp->cb_tags[lp->ia_tag];
  4258. if (++lp->ia_tag == SYM_CONF_MAX_TASK)
  4259. lp->ia_tag = 0;
  4260. ++lp->busy_itlq;
  4261. #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
  4262. lp->itlq_tbl[tag] = cpu_to_scr(cp->ccb_ba);
  4263. lp->head.resel_sa =
  4264. cpu_to_scr(SCRIPTA_BA(np, resel_tag));
  4265. #endif
  4266. #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
  4267. cp->tags_si = lp->tags_si;
  4268. ++lp->tags_sum[cp->tags_si];
  4269. ++lp->tags_since;
  4270. #endif
  4271. }
  4272. else
  4273. goto out_free;
  4274. }
  4275. /*
  4276. * This command will not be tagged.
  4277. * If we already have either a tagged or untagged
  4278. * one, refuse to overlap this untagged one.
  4279. */
  4280. else {
  4281. /*
  4282. * Debugging purpose.
  4283. */
  4284. #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
  4285. if (lp->busy_itl != 0 || lp->busy_itlq != 0)
  4286. goto out_free;
  4287. #endif
  4288. /*
  4289. * Count this nexus for this LUN.
  4290. * Set up the CCB bus address for reselection.
  4291. * Toggle reselect path to untagged.
  4292. */
  4293. ++lp->busy_itl;
  4294. #ifndef SYM_OPT_HANDLE_DEVICE_QUEUEING
  4295. if (lp->busy_itl == 1) {
  4296. lp->head.itl_task_sa = cpu_to_scr(cp->ccb_ba);
  4297. lp->head.resel_sa =
  4298. cpu_to_scr(SCRIPTA_BA(np, resel_no_tag));
  4299. }
  4300. else
  4301. goto out_free;
  4302. #endif
  4303. }
  4304. }
  4305. /*
  4306. * Put the CCB into the busy queue.
  4307. */
  4308. sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
  4309. #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
  4310. if (lp) {
  4311. sym_remque(&cp->link2_ccbq);
  4312. sym_insque_tail(&cp->link2_ccbq, &lp->waiting_ccbq);
  4313. }
  4314. #endif
  4315. cp->to_abort = 0;
  4316. cp->odd_byte_adjustment = 0;
  4317. cp->tag = tag;
  4318. cp->order = tag_order;
  4319. cp->target = tn;
  4320. cp->lun = ln;
  4321. if (DEBUG_FLAGS & DEBUG_TAGS) {
  4322. sym_print_addr(cmd, "ccb @%p using tag %d.\n", cp, tag);
  4323. }
  4324. out:
  4325. return cp;
  4326. out_free:
  4327. sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
  4328. return NULL;
  4329. }
  4330. /*
  4331. * Release one control block
  4332. */
  4333. void sym_free_ccb (struct sym_hcb *np, struct sym_ccb *cp)
  4334. {
  4335. struct sym_tcb *tp = &np->target[cp->target];
  4336. struct sym_lcb *lp = sym_lp(tp, cp->lun);
  4337. if (DEBUG_FLAGS & DEBUG_TAGS) {
  4338. sym_print_addr(cp->cmd, "ccb @%p freeing tag %d.\n",
  4339. cp, cp->tag);
  4340. }
  4341. /*
  4342. * If LCB available,
  4343. */
  4344. if (lp) {
  4345. /*
  4346. * If tagged, release the tag, set the relect path
  4347. */
  4348. if (cp->tag != NO_TAG) {
  4349. #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
  4350. --lp->tags_sum[cp->tags_si];
  4351. #endif
  4352. /*
  4353. * Free the tag value.
  4354. */
  4355. lp->cb_tags[lp->if_tag] = cp->tag;
  4356. if (++lp->if_tag == SYM_CONF_MAX_TASK)
  4357. lp->if_tag = 0;
  4358. /*
  4359. * Make the reselect path invalid,
  4360. * and uncount this CCB.
  4361. */
  4362. lp->itlq_tbl[cp->tag] = cpu_to_scr(np->bad_itlq_ba);
  4363. --lp->busy_itlq;
  4364. } else { /* Untagged */
  4365. /*
  4366. * Make the reselect path invalid,
  4367. * and uncount this CCB.
  4368. */
  4369. lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
  4370. --lp->busy_itl;
  4371. }
  4372. /*
  4373. * If no JOB active, make the LUN reselect path invalid.
  4374. */
  4375. if (lp->busy_itlq == 0 && lp->busy_itl == 0)
  4376. lp->head.resel_sa =
  4377. cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
  4378. }
  4379. /*
  4380. * We donnot queue more than 1 ccb per target
  4381. * with negotiation at any time. If this ccb was
  4382. * used for negotiation, clear this info in the tcb.
  4383. */
  4384. if (cp == tp->nego_cp)
  4385. tp->nego_cp = NULL;
  4386. #ifdef SYM_CONF_IARB_SUPPORT
  4387. /*
  4388. * If we just complete the last queued CCB,
  4389. * clear this info that is no longer relevant.
  4390. */
  4391. if (cp == np->last_cp)
  4392. np->last_cp = 0;
  4393. #endif
  4394. /*
  4395. * Make this CCB available.
  4396. */
  4397. cp->cmd = NULL;
  4398. cp->host_status = HS_IDLE;
  4399. sym_remque(&cp->link_ccbq);
  4400. sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
  4401. #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
  4402. if (lp) {
  4403. sym_remque(&cp->link2_ccbq);
  4404. sym_insque_tail(&cp->link2_ccbq, &np->dummy_ccbq);
  4405. if (cp->started) {
  4406. if (cp->tag != NO_TAG)
  4407. --lp->started_tags;
  4408. else
  4409. --lp->started_no_tag;
  4410. }
  4411. }
  4412. cp->started = 0;
  4413. #endif
  4414. }
  4415. /*
  4416. * Allocate a CCB from memory and initialize its fixed part.
  4417. */
  4418. static struct sym_ccb *sym_alloc_ccb(struct sym_hcb *np)
  4419. {
  4420. struct sym_ccb *cp = NULL;
  4421. int hcode;
  4422. /*
  4423. * Prevent from allocating more CCBs than we can
  4424. * queue to the controller.
  4425. */
  4426. if (np->actccbs >= SYM_CONF_MAX_START)
  4427. return NULL;
  4428. /*
  4429. * Allocate memory for this CCB.
  4430. */
  4431. cp = sym_calloc_dma(sizeof(struct sym_ccb), "CCB");
  4432. if (!cp)
  4433. goto out_free;
  4434. /*
  4435. * Count it.
  4436. */
  4437. np->actccbs++;
  4438. /*
  4439. * Compute the bus address of this ccb.
  4440. */
  4441. cp->ccb_ba = vtobus(cp);
  4442. /*
  4443. * Insert this ccb into the hashed list.
  4444. */
  4445. hcode = CCB_HASH_CODE(cp->ccb_ba);
  4446. cp->link_ccbh = np->ccbh[hcode];
  4447. np->ccbh[hcode] = cp;
  4448. /*
  4449. * Initialyze the start and restart actions.
  4450. */
  4451. cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, idle));
  4452. cp->phys.head.go.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
  4453. /*
  4454. * Initilialyze some other fields.
  4455. */
  4456. cp->phys.smsg_ext.addr = cpu_to_scr(HCB_BA(np, msgin[2]));
  4457. /*
  4458. * Chain into free ccb queue.
  4459. */
  4460. sym_insque_head(&cp->link_ccbq, &np->free_ccbq);
  4461. /*
  4462. * Chain into optionnal lists.
  4463. */
  4464. #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
  4465. sym_insque_head(&cp->link2_ccbq, &np->dummy_ccbq);
  4466. #endif
  4467. return cp;
  4468. out_free:
  4469. if (cp)
  4470. sym_mfree_dma(cp, sizeof(*cp), "CCB");
  4471. return NULL;
  4472. }
  4473. /*
  4474. * Look up a CCB from a DSA value.
  4475. */
  4476. static struct sym_ccb *sym_ccb_from_dsa(struct sym_hcb *np, u32 dsa)
  4477. {
  4478. int hcode;
  4479. struct sym_ccb *cp;
  4480. hcode = CCB_HASH_CODE(dsa);
  4481. cp = np->ccbh[hcode];
  4482. while (cp) {
  4483. if (cp->ccb_ba == dsa)
  4484. break;
  4485. cp = cp->link_ccbh;
  4486. }
  4487. return cp;
  4488. }
  4489. /*
  4490. * Target control block initialisation.
  4491. * Nothing important to do at the moment.
  4492. */
  4493. static void sym_init_tcb (struct sym_hcb *np, u_char tn)
  4494. {
  4495. #if 0 /* Hmmm... this checking looks paranoid. */
  4496. /*
  4497. * Check some alignments required by the chip.
  4498. */
  4499. assert (((offsetof(struct sym_reg, nc_sxfer) ^
  4500. offsetof(struct sym_tcb, head.sval)) &3) == 0);
  4501. assert (((offsetof(struct sym_reg, nc_scntl3) ^
  4502. offsetof(struct sym_tcb, head.wval)) &3) == 0);
  4503. #endif
  4504. }
  4505. /*
  4506. * Lun control block allocation and initialization.
  4507. */
  4508. struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
  4509. {
  4510. struct sym_tcb *tp = &np->target[tn];
  4511. struct sym_lcb *lp = NULL;
  4512. /*
  4513. * Initialize the target control block if not yet.
  4514. */
  4515. sym_init_tcb (np, tn);
  4516. /*
  4517. * Allocate the LCB bus address array.
  4518. * Compute the bus address of this table.
  4519. */
  4520. if (ln && !tp->luntbl) {
  4521. int i;
  4522. tp->luntbl = sym_calloc_dma(256, "LUNTBL");
  4523. if (!tp->luntbl)
  4524. goto fail;
  4525. for (i = 0 ; i < 64 ; i++)
  4526. tp->luntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
  4527. tp->head.luntbl_sa = cpu_to_scr(vtobus(tp->luntbl));
  4528. }
  4529. /*
  4530. * Allocate the table of pointers for LUN(s) > 0, if needed.
  4531. */
  4532. if (ln && !tp->lunmp) {
  4533. tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *),
  4534. GFP_ATOMIC);
  4535. if (!tp->lunmp)
  4536. goto fail;
  4537. }
  4538. /*
  4539. * Allocate the lcb.
  4540. * Make it available to the chip.
  4541. */
  4542. lp = sym_calloc_dma(sizeof(struct sym_lcb), "LCB");
  4543. if (!lp)
  4544. goto fail;
  4545. if (ln) {
  4546. tp->lunmp[ln] = lp;
  4547. tp->luntbl[ln] = cpu_to_scr(vtobus(lp));
  4548. }
  4549. else {
  4550. tp->lun0p = lp;
  4551. tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
  4552. }
  4553. tp->nlcb++;
  4554. /*
  4555. * Let the itl task point to error handling.
  4556. */
  4557. lp->head.itl_task_sa = cpu_to_scr(np->bad_itl_ba);
  4558. /*
  4559. * Set the reselect pattern to our default. :)
  4560. */
  4561. lp->head.resel_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
  4562. /*
  4563. * Set user capabilities.
  4564. */
  4565. lp->user_flags = tp->usrflags & (SYM_DISC_ENABLED | SYM_TAGS_ENABLED);
  4566. #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
  4567. /*
  4568. * Initialize device queueing.
  4569. */
  4570. sym_que_init(&lp->waiting_ccbq);
  4571. sym_que_init(&lp->started_ccbq);
  4572. lp->started_max = SYM_CONF_MAX_TASK;
  4573. lp->started_limit = SYM_CONF_MAX_TASK;
  4574. #endif
  4575. fail:
  4576. return lp;
  4577. }
  4578. /*
  4579. * Allocate LCB resources for tagged command queuing.
  4580. */
  4581. static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln)
  4582. {
  4583. struct sym_tcb *tp = &np->target[tn];
  4584. struct sym_lcb *lp = sym_lp(tp, ln);
  4585. int i;
  4586. /*
  4587. * Allocate the task table and and the tag allocation
  4588. * circular buffer. We want both or none.
  4589. */
  4590. lp->itlq_tbl = sym_calloc_dma(SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
  4591. if (!lp->itlq_tbl)
  4592. goto fail;
  4593. lp->cb_tags = kcalloc(SYM_CONF_MAX_TASK, 1, GFP_ATOMIC);
  4594. if (!lp->cb_tags) {
  4595. sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
  4596. lp->itlq_tbl = NULL;
  4597. goto fail;
  4598. }
  4599. /*
  4600. * Initialize the task table with invalid entries.
  4601. */
  4602. for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
  4603. lp->itlq_tbl[i] = cpu_to_scr(np->notask_ba);
  4604. /*
  4605. * Fill up the tag buffer with tag numbers.
  4606. */
  4607. for (i = 0 ; i < SYM_CONF_MAX_TASK ; i++)
  4608. lp->cb_tags[i] = i;
  4609. /*
  4610. * Make the task table available to SCRIPTS,
  4611. * And accept tagged commands now.
  4612. */
  4613. lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl));
  4614. return;
  4615. fail:
  4616. return;
  4617. }
  4618. /*
  4619. * Lun control block deallocation. Returns the number of valid remaining LCBs
  4620. * for the target.
  4621. */
  4622. int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln)
  4623. {
  4624. struct sym_tcb *tp = &np->target[tn];
  4625. struct sym_lcb *lp = sym_lp(tp, ln);
  4626. tp->nlcb--;
  4627. if (ln) {
  4628. if (!tp->nlcb) {
  4629. kfree(tp->lunmp);
  4630. sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
  4631. tp->lunmp = NULL;
  4632. tp->luntbl = NULL;
  4633. tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl));
  4634. } else {
  4635. tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa));
  4636. tp->lunmp[ln] = NULL;
  4637. }
  4638. } else {
  4639. tp->lun0p = NULL;
  4640. tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa));
  4641. }
  4642. if (lp->itlq_tbl) {
  4643. sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
  4644. kfree(lp->cb_tags);
  4645. }
  4646. sym_mfree_dma(lp, sizeof(*lp), "LCB");
  4647. return tp->nlcb;
  4648. }
  4649. /*
  4650. * Queue a SCSI IO to the controller.
  4651. */
  4652. int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, struct sym_ccb *cp)
  4653. {
  4654. struct scsi_device *sdev = cmd->device;
  4655. struct sym_tcb *tp;
  4656. struct sym_lcb *lp;
  4657. u_char *msgptr;
  4658. u_int msglen;
  4659. int can_disconnect;
  4660. /*
  4661. * Keep track of the IO in our CCB.
  4662. */
  4663. cp->cmd = cmd;
  4664. /*
  4665. * Retrieve the target descriptor.
  4666. */
  4667. tp = &np->target[cp->target];
  4668. /*
  4669. * Retrieve the lun descriptor.
  4670. */
  4671. lp = sym_lp(tp, sdev->lun);
  4672. can_disconnect = (cp->tag != NO_TAG) ||
  4673. (lp && (lp->curr_flags & SYM_DISC_ENABLED));
  4674. msgptr = cp->scsi_smsg;
  4675. msglen = 0;
  4676. msgptr[msglen++] = IDENTIFY(can_disconnect, sdev->lun);
  4677. /*
  4678. * Build the tag message if present.
  4679. */
  4680. if (cp->tag != NO_TAG) {
  4681. u_char order = cp->order;
  4682. switch(order) {
  4683. case M_ORDERED_TAG:
  4684. break;
  4685. case M_HEAD_TAG:
  4686. break;
  4687. default:
  4688. order = M_SIMPLE_TAG;
  4689. }
  4690. #ifdef SYM_OPT_LIMIT_COMMAND_REORDERING
  4691. /*
  4692. * Avoid too much reordering of SCSI commands.
  4693. * The algorithm tries to prevent completion of any
  4694. * tagged command from being delayed against more
  4695. * than 3 times the max number of queued commands.
  4696. */
  4697. if (lp && lp->tags_since > 3*SYM_CONF_MAX_TAG) {
  4698. lp->tags_si = !(lp->tags_si);
  4699. if (lp->tags_sum[lp->tags_si]) {
  4700. order = M_ORDERED_TAG;
  4701. if ((DEBUG_FLAGS & DEBUG_TAGS)||sym_verbose>1) {
  4702. sym_print_addr(cmd,
  4703. "ordered tag forced.\n");
  4704. }
  4705. }
  4706. lp->tags_since = 0;
  4707. }
  4708. #endif
  4709. msgptr[msglen++] = order;
  4710. /*
  4711. * For less than 128 tags, actual tags are numbered
  4712. * 1,3,5,..2*MAXTAGS+1,since we may have to deal
  4713. * with devices that have problems with #TAG 0 or too
  4714. * great #TAG numbers. For more tags (up to 256),
  4715. * we use directly our tag number.
  4716. */
  4717. #if SYM_CONF_MAX_TASK > (512/4)
  4718. msgptr[msglen++] = cp->tag;
  4719. #else
  4720. msgptr[msglen++] = (cp->tag << 1) + 1;
  4721. #endif
  4722. }
  4723. /*
  4724. * Build a negotiation message if needed.
  4725. * (nego_status is filled by sym_prepare_nego())
  4726. *
  4727. * Always negotiate on INQUIRY and REQUEST SENSE.
  4728. *
  4729. */
  4730. cp->nego_status = 0;
  4731. if ((tp->tgoal.check_nego ||
  4732. cmd->cmnd[0] == INQUIRY || cmd->cmnd[0] == REQUEST_SENSE) &&
  4733. !tp->nego_cp && lp) {
  4734. msglen += sym_prepare_nego(np, cp, msgptr + msglen);
  4735. }
  4736. /*
  4737. * Startqueue
  4738. */
  4739. cp->phys.head.go.start = cpu_to_scr(SCRIPTA_BA(np, select));
  4740. cp->phys.head.go.restart = cpu_to_scr(SCRIPTA_BA(np, resel_dsa));
  4741. /*
  4742. * select
  4743. */
  4744. cp->phys.select.sel_id = cp->target;
  4745. cp->phys.select.sel_scntl3 = tp->head.wval;
  4746. cp->phys.select.sel_sxfer = tp->head.sval;
  4747. cp->phys.select.sel_scntl4 = tp->head.uval;
  4748. /*
  4749. * message
  4750. */
  4751. cp->phys.smsg.addr = CCB_BA(cp, scsi_smsg);
  4752. cp->phys.smsg.size = cpu_to_scr(msglen);
  4753. /*
  4754. * status
  4755. */
  4756. cp->host_xflags = 0;
  4757. cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
  4758. cp->ssss_status = S_ILLEGAL;
  4759. cp->xerr_status = 0;
  4760. cp->host_flags = 0;
  4761. cp->extra_bytes = 0;
  4762. /*
  4763. * extreme data pointer.
  4764. * shall be positive, so -1 is lower than lowest.:)
  4765. */
  4766. cp->ext_sg = -1;
  4767. cp->ext_ofs = 0;
  4768. /*
  4769. * Build the CDB and DATA descriptor block
  4770. * and start the IO.
  4771. */
  4772. return sym_setup_data_and_start(np, cmd, cp);
  4773. }
  4774. /*
  4775. * Reset a SCSI target (all LUNs of this target).
  4776. */
  4777. int sym_reset_scsi_target(struct sym_hcb *np, int target)
  4778. {
  4779. struct sym_tcb *tp;
  4780. if (target == np->myaddr || (u_int)target >= SYM_CONF_MAX_TARGET)
  4781. return -1;
  4782. tp = &np->target[target];
  4783. tp->to_reset = 1;
  4784. np->istat_sem = SEM;
  4785. OUTB(np, nc_istat, SIGP|SEM);
  4786. return 0;
  4787. }
  4788. /*
  4789. * Abort a SCSI IO.
  4790. */
  4791. static int sym_abort_ccb(struct sym_hcb *np, struct sym_ccb *cp, int timed_out)
  4792. {
  4793. /*
  4794. * Check that the IO is active.
  4795. */
  4796. if (!cp || !cp->host_status || cp->host_status == HS_WAIT)
  4797. return -1;
  4798. /*
  4799. * If a previous abort didn't succeed in time,
  4800. * perform a BUS reset.
  4801. */
  4802. if (cp->to_abort) {
  4803. sym_reset_scsi_bus(np, 1);
  4804. return 0;
  4805. }
  4806. /*
  4807. * Mark the CCB for abort and allow time for.
  4808. */
  4809. cp->to_abort = timed_out ? 2 : 1;
  4810. /*
  4811. * Tell the SCRIPTS processor to stop and synchronize with us.
  4812. */
  4813. np->istat_sem = SEM;
  4814. OUTB(np, nc_istat, SIGP|SEM);
  4815. return 0;
  4816. }
  4817. int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *cmd, int timed_out)
  4818. {
  4819. struct sym_ccb *cp;
  4820. SYM_QUEHEAD *qp;
  4821. /*
  4822. * Look up our CCB control block.
  4823. */
  4824. cp = NULL;
  4825. FOR_EACH_QUEUED_ELEMENT(&np->busy_ccbq, qp) {
  4826. struct sym_ccb *cp2 = sym_que_entry(qp, struct sym_ccb, link_ccbq);
  4827. if (cp2->cmd == cmd) {
  4828. cp = cp2;
  4829. break;
  4830. }
  4831. }
  4832. return sym_abort_ccb(np, cp, timed_out);
  4833. }
  4834. /*
  4835. * Complete execution of a SCSI command with extended
  4836. * error, SCSI status error, or having been auto-sensed.
  4837. *
  4838. * The SCRIPTS processor is not running there, so we
  4839. * can safely access IO registers and remove JOBs from
  4840. * the START queue.
  4841. * SCRATCHA is assumed to have been loaded with STARTPOS
  4842. * before the SCRIPTS called the C code.
  4843. */
  4844. void sym_complete_error(struct sym_hcb *np, struct sym_ccb *cp)
  4845. {
  4846. struct scsi_device *sdev;
  4847. struct scsi_cmnd *cmd;
  4848. struct sym_tcb *tp;
  4849. struct sym_lcb *lp;
  4850. int resid;
  4851. int i;
  4852. /*
  4853. * Paranoid check. :)
  4854. */
  4855. if (!cp || !cp->cmd)
  4856. return;
  4857. cmd = cp->cmd;
  4858. sdev = cmd->device;
  4859. if (DEBUG_FLAGS & (DEBUG_TINY|DEBUG_RESULT)) {
  4860. dev_info(&sdev->sdev_gendev, "CCB=%p STAT=%x/%x/%x\n", cp,
  4861. cp->host_status, cp->ssss_status, cp->host_flags);
  4862. }
  4863. /*
  4864. * Get target and lun pointers.
  4865. */
  4866. tp = &np->target[cp->target];
  4867. lp = sym_lp(tp, sdev->lun);
  4868. /*
  4869. * Check for extended errors.
  4870. */
  4871. if (cp->xerr_status) {
  4872. if (sym_verbose)
  4873. sym_print_xerr(cmd, cp->xerr_status);
  4874. if (cp->host_status == HS_COMPLETE)
  4875. cp->host_status = HS_COMP_ERR;
  4876. }
  4877. /*
  4878. * Calculate the residual.
  4879. */
  4880. resid = sym_compute_residual(np, cp);
  4881. if (!SYM_SETUP_RESIDUAL_SUPPORT) {/* If user does not want residuals */
  4882. resid = 0; /* throw them away. :) */
  4883. cp->sv_resid = 0;
  4884. }
  4885. #ifdef DEBUG_2_0_X
  4886. if (resid)
  4887. printf("XXXX RESID= %d - 0x%x\n", resid, resid);
  4888. #endif
  4889. /*
  4890. * Dequeue all queued CCBs for that device
  4891. * not yet started by SCRIPTS.
  4892. */
  4893. i = (INL(np, nc_scratcha) - np->squeue_ba) / 4;
  4894. i = sym_dequeue_from_squeue(np, i, cp->target, sdev->lun, -1);
  4895. /*
  4896. * Restart the SCRIPTS processor.
  4897. */
  4898. OUTL_DSP(np, SCRIPTA_BA(np, start));
  4899. #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
  4900. if (cp->host_status == HS_COMPLETE &&
  4901. cp->ssss_status == S_QUEUE_FULL) {
  4902. if (!lp || lp->started_tags - i < 2)
  4903. goto weirdness;
  4904. /*
  4905. * Decrease queue depth as needed.
  4906. */
  4907. lp->started_max = lp->started_tags - i - 1;
  4908. lp->num_sgood = 0;
  4909. if (sym_verbose >= 2) {
  4910. sym_print_addr(cmd, " queue depth is now %d\n",
  4911. lp->started_max);
  4912. }
  4913. /*
  4914. * Repair the CCB.
  4915. */
  4916. cp->host_status = HS_BUSY;
  4917. cp->ssss_status = S_ILLEGAL;
  4918. /*
  4919. * Let's requeue it to device.
  4920. */
  4921. sym_set_cam_status(cmd, DID_SOFT_ERROR);
  4922. goto finish;
  4923. }
  4924. weirdness:
  4925. #endif
  4926. /*
  4927. * Build result in CAM ccb.
  4928. */
  4929. sym_set_cam_result_error(np, cp, resid);
  4930. #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
  4931. finish:
  4932. #endif
  4933. /*
  4934. * Add this one to the COMP queue.
  4935. */
  4936. sym_remque(&cp->link_ccbq);
  4937. sym_insque_head(&cp->link_ccbq, &np->comp_ccbq);
  4938. /*
  4939. * Complete all those commands with either error
  4940. * or requeue condition.
  4941. */
  4942. sym_flush_comp_queue(np, 0);
  4943. #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
  4944. /*
  4945. * Donnot start more than 1 command after an error.
  4946. */
  4947. sym_start_next_ccbs(np, lp, 1);
  4948. #endif
  4949. }
  4950. /*
  4951. * Complete execution of a successful SCSI command.
  4952. *
  4953. * Only successful commands go to the DONE queue,
  4954. * since we need to have the SCRIPTS processor
  4955. * stopped on any error condition.
  4956. * The SCRIPTS processor is running while we are
  4957. * completing successful commands.
  4958. */
  4959. void sym_complete_ok (struct sym_hcb *np, struct sym_ccb *cp)
  4960. {
  4961. struct sym_tcb *tp;
  4962. struct sym_lcb *lp;
  4963. struct scsi_cmnd *cmd;
  4964. int resid;
  4965. /*
  4966. * Paranoid check. :)
  4967. */
  4968. if (!cp || !cp->cmd)
  4969. return;
  4970. assert (cp->host_status == HS_COMPLETE);
  4971. /*
  4972. * Get user command.
  4973. */
  4974. cmd = cp->cmd;
  4975. /*
  4976. * Get target and lun pointers.
  4977. */
  4978. tp = &np->target[cp->target];
  4979. lp = sym_lp(tp, cp->lun);
  4980. /*
  4981. * If all data have been transferred, given than no
  4982. * extended error did occur, there is no residual.
  4983. */
  4984. resid = 0;
  4985. if (cp->phys.head.lastp != cp->goalp)
  4986. resid = sym_compute_residual(np, cp);
  4987. /*
  4988. * Wrong transfer residuals may be worse than just always
  4989. * returning zero. User can disable this feature in
  4990. * sym53c8xx.h. Residual support is enabled by default.
  4991. */
  4992. if (!SYM_SETUP_RESIDUAL_SUPPORT)
  4993. resid = 0;
  4994. #ifdef DEBUG_2_0_X
  4995. if (resid)
  4996. printf("XXXX RESID= %d - 0x%x\n", resid, resid);
  4997. #endif
  4998. /*
  4999. * Build result in CAM ccb.
  5000. */
  5001. sym_set_cam_result_ok(cp, cmd, resid);
  5002. #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
  5003. /*
  5004. * If max number of started ccbs had been reduced,
  5005. * increase it if 200 good status received.
  5006. */
  5007. if (lp && lp->started_max < lp->started_limit) {
  5008. ++lp->num_sgood;
  5009. if (lp->num_sgood >= 200) {
  5010. lp->num_sgood = 0;
  5011. ++lp->started_max;
  5012. if (sym_verbose >= 2) {
  5013. sym_print_addr(cmd, " queue depth is now %d\n",
  5014. lp->started_max);
  5015. }
  5016. }
  5017. }
  5018. #endif
  5019. /*
  5020. * Free our CCB.
  5021. */
  5022. sym_free_ccb (np, cp);
  5023. #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
  5024. /*
  5025. * Requeue a couple of awaiting scsi commands.
  5026. */
  5027. if (!sym_que_empty(&lp->waiting_ccbq))
  5028. sym_start_next_ccbs(np, lp, 2);
  5029. #endif
  5030. /*
  5031. * Complete the command.
  5032. */
  5033. sym_xpt_done(np, cmd);
  5034. }
  5035. /*
  5036. * Soft-attach the controller.
  5037. */
  5038. int sym_hcb_attach(struct Scsi_Host *shost, struct sym_fw *fw, struct sym_nvram *nvram)
  5039. {
  5040. struct sym_hcb *np = sym_get_hcb(shost);
  5041. int i;
  5042. /*
  5043. * Get some info about the firmware.
  5044. */
  5045. np->scripta_sz = fw->a_size;
  5046. np->scriptb_sz = fw->b_size;
  5047. np->scriptz_sz = fw->z_size;
  5048. np->fw_setup = fw->setup;
  5049. np->fw_patch = fw->patch;
  5050. np->fw_name = fw->name;
  5051. /*
  5052. * Save setting of some IO registers, so we will
  5053. * be able to probe specific implementations.
  5054. */
  5055. sym_save_initial_setting (np);
  5056. /*
  5057. * Reset the chip now, since it has been reported
  5058. * that SCSI clock calibration may not work properly
  5059. * if the chip is currently active.
  5060. */
  5061. sym_chip_reset(np);
  5062. /*
  5063. * Prepare controller and devices settings, according
  5064. * to chip features, user set-up and driver set-up.
  5065. */
  5066. sym_prepare_setting(shost, np, nvram);
  5067. /*
  5068. * Check the PCI clock frequency.
  5069. * Must be performed after prepare_setting since it destroys
  5070. * STEST1 that is used to probe for the clock doubler.
  5071. */
  5072. i = sym_getpciclock(np);
  5073. if (i > 37000 && !(np->features & FE_66MHZ))
  5074. printf("%s: PCI BUS clock seems too high: %u KHz.\n",
  5075. sym_name(np), i);
  5076. /*
  5077. * Allocate the start queue.
  5078. */
  5079. np->squeue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"SQUEUE");
  5080. if (!np->squeue)
  5081. goto attach_failed;
  5082. np->squeue_ba = vtobus(np->squeue);
  5083. /*
  5084. * Allocate the done queue.
  5085. */
  5086. np->dqueue = sym_calloc_dma(sizeof(u32)*(MAX_QUEUE*2),"DQUEUE");
  5087. if (!np->dqueue)
  5088. goto attach_failed;
  5089. np->dqueue_ba = vtobus(np->dqueue);
  5090. /*
  5091. * Allocate the target bus address array.
  5092. */
  5093. np->targtbl = sym_calloc_dma(256, "TARGTBL");
  5094. if (!np->targtbl)
  5095. goto attach_failed;
  5096. np->targtbl_ba = vtobus(np->targtbl);
  5097. /*
  5098. * Allocate SCRIPTS areas.
  5099. */
  5100. np->scripta0 = sym_calloc_dma(np->scripta_sz, "SCRIPTA0");
  5101. np->scriptb0 = sym_calloc_dma(np->scriptb_sz, "SCRIPTB0");
  5102. np->scriptz0 = sym_calloc_dma(np->scriptz_sz, "SCRIPTZ0");
  5103. if (!np->scripta0 || !np->scriptb0 || !np->scriptz0)
  5104. goto attach_failed;
  5105. /*
  5106. * Allocate the array of lists of CCBs hashed by DSA.
  5107. */
  5108. np->ccbh = kcalloc(CCB_HASH_SIZE, sizeof(struct sym_ccb **), GFP_KERNEL);
  5109. if (!np->ccbh)
  5110. goto attach_failed;
  5111. /*
  5112. * Initialyze the CCB free and busy queues.
  5113. */
  5114. sym_que_init(&np->free_ccbq);
  5115. sym_que_init(&np->busy_ccbq);
  5116. sym_que_init(&np->comp_ccbq);
  5117. /*
  5118. * Initialization for optional handling
  5119. * of device queueing.
  5120. */
  5121. #ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING
  5122. sym_que_init(&np->dummy_ccbq);
  5123. #endif
  5124. /*
  5125. * Allocate some CCB. We need at least ONE.
  5126. */
  5127. if (!sym_alloc_ccb(np))
  5128. goto attach_failed;
  5129. /*
  5130. * Calculate BUS addresses where we are going
  5131. * to load the SCRIPTS.
  5132. */
  5133. np->scripta_ba = vtobus(np->scripta0);
  5134. np->scriptb_ba = vtobus(np->scriptb0);
  5135. np->scriptz_ba = vtobus(np->scriptz0);
  5136. if (np->ram_ba) {
  5137. np->scripta_ba = np->ram_ba;
  5138. if (np->features & FE_RAM8K) {
  5139. np->scriptb_ba = np->scripta_ba + 4096;
  5140. #if 0 /* May get useful for 64 BIT PCI addressing */
  5141. np->scr_ram_seg = cpu_to_scr(np->scripta_ba >> 32);
  5142. #endif
  5143. }
  5144. }
  5145. /*
  5146. * Copy scripts to controller instance.
  5147. */
  5148. memcpy(np->scripta0, fw->a_base, np->scripta_sz);
  5149. memcpy(np->scriptb0, fw->b_base, np->scriptb_sz);
  5150. memcpy(np->scriptz0, fw->z_base, np->scriptz_sz);
  5151. /*
  5152. * Setup variable parts in scripts and compute
  5153. * scripts bus addresses used from the C code.
  5154. */
  5155. np->fw_setup(np, fw);
  5156. /*
  5157. * Bind SCRIPTS with physical addresses usable by the
  5158. * SCRIPTS processor (as seen from the BUS = BUS addresses).
  5159. */
  5160. sym_fw_bind_script(np, (u32 *) np->scripta0, np->scripta_sz);
  5161. sym_fw_bind_script(np, (u32 *) np->scriptb0, np->scriptb_sz);
  5162. sym_fw_bind_script(np, (u32 *) np->scriptz0, np->scriptz_sz);
  5163. #ifdef SYM_CONF_IARB_SUPPORT
  5164. /*
  5165. * If user wants IARB to be set when we win arbitration
  5166. * and have other jobs, compute the max number of consecutive
  5167. * settings of IARB hints before we leave devices a chance to
  5168. * arbitrate for reselection.
  5169. */
  5170. #ifdef SYM_SETUP_IARB_MAX
  5171. np->iarb_max = SYM_SETUP_IARB_MAX;
  5172. #else
  5173. np->iarb_max = 4;
  5174. #endif
  5175. #endif
  5176. /*
  5177. * Prepare the idle and invalid task actions.
  5178. */
  5179. np->idletask.start = cpu_to_scr(SCRIPTA_BA(np, idle));
  5180. np->idletask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
  5181. np->idletask_ba = vtobus(&np->idletask);
  5182. np->notask.start = cpu_to_scr(SCRIPTA_BA(np, idle));
  5183. np->notask.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
  5184. np->notask_ba = vtobus(&np->notask);
  5185. np->bad_itl.start = cpu_to_scr(SCRIPTA_BA(np, idle));
  5186. np->bad_itl.restart = cpu_to_scr(SCRIPTB_BA(np, bad_i_t_l));
  5187. np->bad_itl_ba = vtobus(&np->bad_itl);
  5188. np->bad_itlq.start = cpu_to_scr(SCRIPTA_BA(np, idle));
  5189. np->bad_itlq.restart = cpu_to_scr(SCRIPTB_BA(np,bad_i_t_l_q));
  5190. np->bad_itlq_ba = vtobus(&np->bad_itlq);
  5191. /*
  5192. * Allocate and prepare the lun JUMP table that is used
  5193. * for a target prior the probing of devices (bad lun table).
  5194. * A private table will be allocated for the target on the
  5195. * first INQUIRY response received.
  5196. */
  5197. np->badluntbl = sym_calloc_dma(256, "BADLUNTBL");
  5198. if (!np->badluntbl)
  5199. goto attach_failed;
  5200. np->badlun_sa = cpu_to_scr(SCRIPTB_BA(np, resel_bad_lun));
  5201. for (i = 0 ; i < 64 ; i++) /* 64 luns/target, no less */
  5202. np->badluntbl[i] = cpu_to_scr(vtobus(&np->badlun_sa));
  5203. /*
  5204. * Prepare the bus address array that contains the bus
  5205. * address of each target control block.
  5206. * For now, assume all logical units are wrong. :)
  5207. */
  5208. for (i = 0 ; i < SYM_CONF_MAX_TARGET ; i++) {
  5209. np->targtbl[i] = cpu_to_scr(vtobus(&np->target[i]));
  5210. np->target[i].head.luntbl_sa =
  5211. cpu_to_scr(vtobus(np->badluntbl));
  5212. np->target[i].head.lun0_sa =
  5213. cpu_to_scr(vtobus(&np->badlun_sa));
  5214. }
  5215. /*
  5216. * Now check the cache handling of the pci chipset.
  5217. */
  5218. if (sym_snooptest (np)) {
  5219. printf("%s: CACHE INCORRECTLY CONFIGURED.\n", sym_name(np));
  5220. goto attach_failed;
  5221. }
  5222. /*
  5223. * Sigh! we are done.
  5224. */
  5225. return 0;
  5226. attach_failed:
  5227. return -ENXIO;
  5228. }
  5229. /*
  5230. * Free everything that has been allocated for this device.
  5231. */
  5232. void sym_hcb_free(struct sym_hcb *np)
  5233. {
  5234. SYM_QUEHEAD *qp;
  5235. struct sym_ccb *cp;
  5236. struct sym_tcb *tp;
  5237. int target;
  5238. if (np->scriptz0)
  5239. sym_mfree_dma(np->scriptz0, np->scriptz_sz, "SCRIPTZ0");
  5240. if (np->scriptb0)
  5241. sym_mfree_dma(np->scriptb0, np->scriptb_sz, "SCRIPTB0");
  5242. if (np->scripta0)
  5243. sym_mfree_dma(np->scripta0, np->scripta_sz, "SCRIPTA0");
  5244. if (np->squeue)
  5245. sym_mfree_dma(np->squeue, sizeof(u32)*(MAX_QUEUE*2), "SQUEUE");
  5246. if (np->dqueue)
  5247. sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE");
  5248. if (np->actccbs) {
  5249. while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) {
  5250. cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
  5251. sym_mfree_dma(cp, sizeof(*cp), "CCB");
  5252. }
  5253. }
  5254. kfree(np->ccbh);
  5255. if (np->badluntbl)
  5256. sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL");
  5257. for (target = 0; target < SYM_CONF_MAX_TARGET ; target++) {
  5258. tp = &np->target[target];
  5259. if (tp->luntbl)
  5260. sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
  5261. #if SYM_CONF_MAX_LUN > 1
  5262. kfree(tp->lunmp);
  5263. #endif
  5264. }
  5265. if (np->targtbl)
  5266. sym_mfree_dma(np->targtbl, 256, "TARGTBL");
  5267. }