qla1280.c 125 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507
  1. /******************************************************************************
  2. * QLOGIC LINUX SOFTWARE
  3. *
  4. * QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
  5. * Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
  6. * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
  7. * Copyright (C) 2003-2004 Christoph Hellwig
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the
  11. * Free Software Foundation; either version 2, or (at your option) any
  12. * later version.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. ******************************************************************************/
  20. #define QLA1280_VERSION "3.27.1"
  21. /*****************************************************************************
  22. Revision History:
  23. Rev 3.27.1, February 8, 2010, Michael Reed
  24. - Retain firmware image for error recovery.
  25. Rev 3.27, February 10, 2009, Michael Reed
  26. - General code cleanup.
  27. - Improve error recovery.
  28. Rev 3.26, January 16, 2006 Jes Sorensen
  29. - Ditch all < 2.6 support
  30. Rev 3.25.1, February 10, 2005 Christoph Hellwig
  31. - use pci_map_single to map non-S/G requests
  32. - remove qla1280_proc_info
  33. Rev 3.25, September 28, 2004, Christoph Hellwig
  34. - add support for ISP1020/1040
  35. - don't include "scsi.h" anymore for 2.6.x
  36. Rev 3.24.4 June 7, 2004 Christoph Hellwig
  37. - restructure firmware loading, cleanup initialization code
  38. - prepare support for ISP1020/1040 chips
  39. Rev 3.24.3 January 19, 2004, Jes Sorensen
  40. - Handle PCI DMA mask settings correctly
  41. - Correct order of error handling in probe_one, free_irq should not
  42. be called if request_irq failed
  43. Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez
  44. - Big endian fixes (James)
  45. - Remove bogus IOCB content on zero data transfer commands (Andrew)
  46. Rev 3.24.1 January 5, 2004, Jes Sorensen
  47. - Initialize completion queue to avoid OOPS on probe
  48. - Handle interrupts during mailbox testing
  49. Rev 3.24 November 17, 2003, Christoph Hellwig
  50. - use struct list_head for completion queue
  51. - avoid old Scsi_FOO typedefs
  52. - cleanup 2.4 compat glue a bit
  53. - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h"
  54. - make initialization for memory mapped vs port I/O more similar
  55. - remove broken pci config space manipulation
  56. - kill more cruft
  57. - this is an almost perfect 2.6 scsi driver now! ;)
  58. Rev 3.23.39 December 17, 2003, Jes Sorensen
  59. - Delete completion queue from srb if mailbox command failed to
  60. to avoid qla1280_done completeting qla1280_error_action's
  61. obsolete context
  62. - Reduce arguments for qla1280_done
  63. Rev 3.23.38 October 18, 2003, Christoph Hellwig
  64. - Convert to new-style hotplugable driver for 2.6
  65. - Fix missing scsi_unregister/scsi_host_put on HBA removal
  66. - Kill some more cruft
  67. Rev 3.23.37 October 1, 2003, Jes Sorensen
  68. - Make MMIO depend on CONFIG_X86_VISWS instead of yet another
  69. random CONFIG option
  70. - Clean up locking in probe path
  71. Rev 3.23.36 October 1, 2003, Christoph Hellwig
  72. - queuecommand only ever receives new commands - clear flags
  73. - Reintegrate lost fixes from Linux 2.5
  74. Rev 3.23.35 August 14, 2003, Jes Sorensen
  75. - Build against 2.6
  76. Rev 3.23.34 July 23, 2003, Jes Sorensen
  77. - Remove pointless TRUE/FALSE macros
  78. - Clean up vchan handling
  79. Rev 3.23.33 July 3, 2003, Jes Sorensen
  80. - Don't define register access macros before define determining MMIO.
  81. This just happened to work out on ia64 but not elsewhere.
  82. - Don't try and read from the card while it is in reset as
  83. it won't respond and causes an MCA
  84. Rev 3.23.32 June 23, 2003, Jes Sorensen
  85. - Basic support for boot time arguments
  86. Rev 3.23.31 June 8, 2003, Jes Sorensen
  87. - Reduce boot time messages
  88. Rev 3.23.30 June 6, 2003, Jes Sorensen
  89. - Do not enable sync/wide/ppr before it has been determined
  90. that the target device actually supports it
  91. - Enable DMA arbitration for multi channel controllers
  92. Rev 3.23.29 June 3, 2003, Jes Sorensen
  93. - Port to 2.5.69
  94. Rev 3.23.28 June 3, 2003, Jes Sorensen
  95. - Eliminate duplicate marker commands on bus resets
  96. - Handle outstanding commands appropriately on bus/device resets
  97. Rev 3.23.27 May 28, 2003, Jes Sorensen
  98. - Remove bogus input queue code, let the Linux SCSI layer do the work
  99. - Clean up NVRAM handling, only read it once from the card
  100. - Add a number of missing default nvram parameters
  101. Rev 3.23.26 Beta May 28, 2003, Jes Sorensen
  102. - Use completion queue for mailbox commands instead of busy wait
  103. Rev 3.23.25 Beta May 27, 2003, James Bottomley
  104. - Migrate to use new error handling code
  105. Rev 3.23.24 Beta May 21, 2003, James Bottomley
  106. - Big endian support
  107. - Cleanup data direction code
  108. Rev 3.23.23 Beta May 12, 2003, Jes Sorensen
  109. - Switch to using MMIO instead of PIO
  110. Rev 3.23.22 Beta April 15, 2003, Jes Sorensen
  111. - Fix PCI parity problem with 12160 during reset.
  112. Rev 3.23.21 Beta April 14, 2003, Jes Sorensen
  113. - Use pci_map_page()/pci_unmap_page() instead of map_single version.
  114. Rev 3.23.20 Beta April 9, 2003, Jes Sorensen
  115. - Remove < 2.4.x support
  116. - Introduce HOST_LOCK to make the spin lock changes portable.
  117. - Remove a bunch of idiotic and unnecessary typedef's
  118. - Kill all leftovers of target-mode support which never worked anyway
  119. Rev 3.23.19 Beta April 11, 2002, Linus Torvalds
  120. - Do qla1280_pci_config() before calling request_irq() and
  121. request_region()
  122. - Use pci_dma_hi32() to handle upper word of DMA addresses instead
  123. of large shifts
  124. - Hand correct arguments to free_irq() in case of failure
  125. Rev 3.23.18 Beta April 11, 2002, Jes Sorensen
  126. - Run source through Lindent and clean up the output
  127. Rev 3.23.17 Beta April 11, 2002, Jes Sorensen
  128. - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32
  129. Rev 3.23.16 Beta March 19, 2002, Jes Sorensen
  130. - Rely on mailbox commands generating interrupts - do not
  131. run qla1280_isr() from ql1280_mailbox_command()
  132. - Remove device_reg_t
  133. - Integrate ql12160_set_target_parameters() with 1280 version
  134. - Make qla1280_setup() non static
  135. - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
  136. sent to the card - this command pauses the firmware!!!
  137. Rev 3.23.15 Beta March 19, 2002, Jes Sorensen
  138. - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
  139. - Remove a pile of pointless and confusing (srb_t **) and
  140. (scsi_lu_t *) typecasts
  141. - Explicit mark that we do not use the new error handling (for now)
  142. - Remove scsi_qla_host_t and use 'struct' instead
  143. - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled,
  144. pci_64bit_slot flags which weren't used for anything anyway
  145. - Grab host->host_lock while calling qla1280_isr() from abort()
  146. - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we
  147. do not need to save/restore flags in the interrupt handler
  148. - Enable interrupts early (before any mailbox access) in preparation
  149. for cleaning up the mailbox handling
  150. Rev 3.23.14 Beta March 14, 2002, Jes Sorensen
  151. - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace
  152. it with proper use of dprintk().
  153. - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take
  154. a debug level argument to determine if data is to be printed
  155. - Add KERN_* info to printk()
  156. Rev 3.23.13 Beta March 14, 2002, Jes Sorensen
  157. - Significant cosmetic cleanups
  158. - Change debug code to use dprintk() and remove #if mess
  159. Rev 3.23.12 Beta March 13, 2002, Jes Sorensen
  160. - More cosmetic cleanups, fix places treating return as function
  161. - use cpu_relax() in qla1280_debounce_register()
  162. Rev 3.23.11 Beta March 13, 2002, Jes Sorensen
  163. - Make it compile under 2.5.5
  164. Rev 3.23.10 Beta October 1, 2001, Jes Sorensen
  165. - Do no typecast short * to long * in QL1280BoardTbl, this
  166. broke miserably on big endian boxes
  167. Rev 3.23.9 Beta September 30, 2001, Jes Sorensen
  168. - Remove pre 2.2 hack for checking for reentrance in interrupt handler
  169. - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32
  170. unsigned int to match the types from struct scsi_cmnd
  171. Rev 3.23.8 Beta September 29, 2001, Jes Sorensen
  172. - Remove bogus timer_t typedef from qla1280.h
  173. - Remove obsolete pre 2.2 PCI setup code, use proper #define's
  174. for PCI_ values, call pci_set_master()
  175. - Fix memleak of qla1280_buffer on module unload
  176. - Only compile module parsing code #ifdef MODULE - should be
  177. changed to use individual MODULE_PARM's later
  178. - Remove dummy_buffer that was never modified nor printed
  179. - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove
  180. #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls
  181. - Remove \r from print statements, this is Linux, not DOS
  182. - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK}
  183. dummy macros
  184. - Remove C++ compile hack in header file as Linux driver are not
  185. supposed to be compiled as C++
  186. - Kill MS_64BITS macro as it makes the code more readable
  187. - Remove unnecessary flags.in_interrupts bit
  188. Rev 3.23.7 Beta August 20, 2001, Jes Sorensen
  189. - Dont' check for set flags on q->q_flag one by one in qla1280_next()
  190. - Check whether the interrupt was generated by the QLA1280 before
  191. doing any processing
  192. - qla1280_status_entry(): Only zero out part of sense_buffer that
  193. is not being copied into
  194. - Remove more superflouous typecasts
  195. - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy()
  196. Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel
  197. - Don't walk the entire list in qla1280_putq_t() just to directly
  198. grab the pointer to the last element afterwards
  199. Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
  200. - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
  201. Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
  202. - Set dev->max_sectors to 1024
  203. Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
  204. - Provide compat macros for pci_enable_device(), pci_find_subsys()
  205. and scsi_set_pci_device()
  206. - Call scsi_set_pci_device() for all devices
  207. - Reduce size of kernel version dependent device probe code
  208. - Move duplicate probe/init code to separate function
  209. - Handle error if qla1280_mem_alloc() fails
  210. - Kill OFFSET() macro and use Linux's PCI definitions instead
  211. - Kill private structure defining PCI config space (struct config_reg)
  212. - Only allocate I/O port region if not in MMIO mode
  213. - Remove duplicate (unused) sanity check of sife of srb_t
  214. Rev 3.23.2 Beta August 6, 2001, Jes Sorensen
  215. - Change home-brew memset() implementations to use memset()
  216. - Remove all references to COMTRACE() - accessing a PC's COM2 serial
  217. port directly is not legal under Linux.
  218. Rev 3.23.1 Beta April 24, 2001, Jes Sorensen
  219. - Remove pre 2.2 kernel support
  220. - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat)
  221. - Fix MMIO access to use readl/writel instead of directly
  222. dereferencing pointers
  223. - Nuke MSDOS debugging code
  224. - Change true/false data types to int from uint8_t
  225. - Use int for counters instead of uint8_t etc.
  226. - Clean up size & byte order conversion macro usage
  227. Rev 3.23 Beta January 11, 2001 BN Qlogic
  228. - Added check of device_id when handling non
  229. QLA12160s during detect().
  230. Rev 3.22 Beta January 5, 2001 BN Qlogic
  231. - Changed queue_task() to schedule_task()
  232. for kernels 2.4.0 and higher.
  233. Note: 2.4.0-testxx kernels released prior to
  234. the actual 2.4.0 kernel release on January 2001
  235. will get compile/link errors with schedule_task().
  236. Please update your kernel to released 2.4.0 level,
  237. or comment lines in this file flagged with 3.22
  238. to resolve compile/link error of schedule_task().
  239. - Added -DCONFIG_SMP in addition to -D__SMP__
  240. in Makefile for 2.4.0 builds of driver as module.
  241. Rev 3.21 Beta January 4, 2001 BN Qlogic
  242. - Changed criteria of 64/32 Bit mode of HBA
  243. operation according to BITS_PER_LONG rather
  244. than HBA's NVRAM setting of >4Gig memory bit;
  245. so that the HBA auto-configures without the need
  246. to setup each system individually.
  247. Rev 3.20 Beta December 5, 2000 BN Qlogic
  248. - Added priority handling to IA-64 onboard SCSI
  249. ISP12160 chip for kernels greater than 2.3.18.
  250. - Added irqrestore for qla1280_intr_handler.
  251. - Enabled /proc/scsi/qla1280 interface.
  252. - Clear /proc/scsi/qla1280 counters in detect().
  253. Rev 3.19 Beta October 13, 2000 BN Qlogic
  254. - Declare driver_template for new kernel
  255. (2.4.0 and greater) scsi initialization scheme.
  256. - Update /proc/scsi entry for 2.3.18 kernels and
  257. above as qla1280
  258. Rev 3.18 Beta October 10, 2000 BN Qlogic
  259. - Changed scan order of adapters to map
  260. the QLA12160 followed by the QLA1280.
  261. Rev 3.17 Beta September 18, 2000 BN Qlogic
  262. - Removed warnings for 32 bit 2.4.x compiles
  263. - Corrected declared size for request and response
  264. DMA addresses that are kept in each ha
  265. Rev. 3.16 Beta August 25, 2000 BN Qlogic
  266. - Corrected 64 bit addressing issue on IA-64
  267. where the upper 32 bits were not properly
  268. passed to the RISC engine.
  269. Rev. 3.15 Beta August 22, 2000 BN Qlogic
  270. - Modified qla1280_setup_chip to properly load
  271. ISP firmware for greater that 4 Gig memory on IA-64
  272. Rev. 3.14 Beta August 16, 2000 BN Qlogic
  273. - Added setting of dma_mask to full 64 bit
  274. if flags.enable_64bit_addressing is set in NVRAM
  275. Rev. 3.13 Beta August 16, 2000 BN Qlogic
  276. - Use new PCI DMA mapping APIs for 2.4.x kernel
  277. Rev. 3.12 July 18, 2000 Redhat & BN Qlogic
  278. - Added check of pci_enable_device to detect() for 2.3.x
  279. - Use pci_resource_start() instead of
  280. pdev->resource[0].start in detect() for 2.3.x
  281. - Updated driver version
  282. Rev. 3.11 July 14, 2000 BN Qlogic
  283. - Updated SCSI Firmware to following versions:
  284. qla1x80: 8.13.08
  285. qla1x160: 10.04.08
  286. - Updated driver version to 3.11
  287. Rev. 3.10 June 23, 2000 BN Qlogic
  288. - Added filtering of AMI SubSys Vendor ID devices
  289. Rev. 3.9
  290. - DEBUG_QLA1280 undefined and new version BN Qlogic
  291. Rev. 3.08b May 9, 2000 MD Dell
  292. - Added logic to check against AMI subsystem vendor ID
  293. Rev. 3.08 May 4, 2000 DG Qlogic
  294. - Added logic to check for PCI subsystem ID.
  295. Rev. 3.07 Apr 24, 2000 DG & BN Qlogic
  296. - Updated SCSI Firmware to following versions:
  297. qla12160: 10.01.19
  298. qla1280: 8.09.00
  299. Rev. 3.06 Apr 12, 2000 DG & BN Qlogic
  300. - Internal revision; not released
  301. Rev. 3.05 Mar 28, 2000 DG & BN Qlogic
  302. - Edit correction for virt_to_bus and PROC.
  303. Rev. 3.04 Mar 28, 2000 DG & BN Qlogic
  304. - Merge changes from ia64 port.
  305. Rev. 3.03 Mar 28, 2000 BN Qlogic
  306. - Increase version to reflect new code drop with compile fix
  307. of issue with inclusion of linux/spinlock for 2.3 kernels
  308. Rev. 3.02 Mar 15, 2000 BN Qlogic
  309. - Merge qla1280_proc_info from 2.10 code base
  310. Rev. 3.01 Feb 10, 2000 BN Qlogic
  311. - Corrected code to compile on a 2.2.x kernel.
  312. Rev. 3.00 Jan 17, 2000 DG Qlogic
  313. - Added 64-bit support.
  314. Rev. 2.07 Nov 9, 1999 DG Qlogic
  315. - Added new routine to set target parameters for ISP12160.
  316. Rev. 2.06 Sept 10, 1999 DG Qlogic
  317. - Added support for ISP12160 Ultra 3 chip.
  318. Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont
  319. - Modified code to remove errors generated when compiling with
  320. Cygnus IA64 Compiler.
  321. - Changed conversion of pointers to unsigned longs instead of integers.
  322. - Changed type of I/O port variables from uint32_t to unsigned long.
  323. - Modified OFFSET macro to work with 64-bit as well as 32-bit.
  324. - Changed sprintf and printk format specifiers for pointers to %p.
  325. - Changed some int to long type casts where needed in sprintf & printk.
  326. - Added l modifiers to sprintf and printk format specifiers for longs.
  327. - Removed unused local variables.
  328. Rev. 1.20 June 8, 1999 DG, Qlogic
  329. Changes to support RedHat release 6.0 (kernel 2.2.5).
  330. - Added SCSI exclusive access lock (io_request_lock) when accessing
  331. the adapter.
  332. - Added changes for the new LINUX interface template. Some new error
  333. handling routines have been added to the template, but for now we
  334. will use the old ones.
  335. - Initial Beta Release.
  336. *****************************************************************************/
  337. #include <linux/module.h>
  338. #include <linux/types.h>
  339. #include <linux/string.h>
  340. #include <linux/errno.h>
  341. #include <linux/kernel.h>
  342. #include <linux/ioport.h>
  343. #include <linux/delay.h>
  344. #include <linux/timer.h>
  345. #include <linux/pci.h>
  346. #include <linux/proc_fs.h>
  347. #include <linux/stat.h>
  348. #include <linux/pci_ids.h>
  349. #include <linux/interrupt.h>
  350. #include <linux/init.h>
  351. #include <linux/dma-mapping.h>
  352. #include <linux/firmware.h>
  353. #include <asm/io.h>
  354. #include <asm/irq.h>
  355. #include <asm/byteorder.h>
  356. #include <asm/processor.h>
  357. #include <asm/types.h>
  358. #include <asm/system.h>
  359. #include <scsi/scsi.h>
  360. #include <scsi/scsi_cmnd.h>
  361. #include <scsi/scsi_device.h>
  362. #include <scsi/scsi_host.h>
  363. #include <scsi/scsi_tcq.h>
  364. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
  365. #include <asm/sn/io.h>
  366. #endif
  367. /*
  368. * Compile time Options:
  369. * 0 - Disable and 1 - Enable
  370. */
  371. #define DEBUG_QLA1280_INTR 0
  372. #define DEBUG_PRINT_NVRAM 0
  373. #define DEBUG_QLA1280 0
  374. /*
  375. * The SGI VISWS is broken and doesn't support MMIO ;-(
  376. */
  377. #ifdef CONFIG_X86_VISWS
  378. #define MEMORY_MAPPED_IO 0
  379. #else
  380. #define MEMORY_MAPPED_IO 1
  381. #endif
  382. #include "qla1280.h"
  383. #ifndef BITS_PER_LONG
  384. #error "BITS_PER_LONG not defined!"
  385. #endif
  386. #if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
  387. #define QLA_64BIT_PTR 1
  388. #endif
  389. #ifdef QLA_64BIT_PTR
  390. #define pci_dma_hi32(a) ((a >> 16) >> 16)
  391. #else
  392. #define pci_dma_hi32(a) 0
  393. #endif
  394. #define pci_dma_lo32(a) (a & 0xffffffff)
  395. #define NVRAM_DELAY() udelay(500) /* 2 microseconds */
  396. #if defined(__ia64__) && !defined(ia64_platform_is)
  397. #define ia64_platform_is(foo) (!strcmp(x, platform_name))
  398. #endif
  399. #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
  400. #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
  401. ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
  402. #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
  403. ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
  404. static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
  405. static void qla1280_remove_one(struct pci_dev *);
  406. /*
  407. * QLogic Driver Support Function Prototypes.
  408. */
  409. static void qla1280_done(struct scsi_qla_host *);
  410. static int qla1280_get_token(char *);
  411. static int qla1280_setup(char *s) __init;
  412. /*
  413. * QLogic ISP1280 Hardware Support Function Prototypes.
  414. */
  415. static int qla1280_load_firmware(struct scsi_qla_host *);
  416. static int qla1280_init_rings(struct scsi_qla_host *);
  417. static int qla1280_nvram_config(struct scsi_qla_host *);
  418. static int qla1280_mailbox_command(struct scsi_qla_host *,
  419. uint8_t, uint16_t *);
  420. static int qla1280_bus_reset(struct scsi_qla_host *, int);
  421. static int qla1280_device_reset(struct scsi_qla_host *, int, int);
  422. static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
  423. static int qla1280_abort_isp(struct scsi_qla_host *);
  424. #ifdef QLA_64BIT_PTR
  425. static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
  426. #else
  427. static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
  428. #endif
  429. static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
  430. static void qla1280_poll(struct scsi_qla_host *);
  431. static void qla1280_reset_adapter(struct scsi_qla_host *);
  432. static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
  433. static void qla1280_isp_cmd(struct scsi_qla_host *);
  434. static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
  435. static void qla1280_rst_aen(struct scsi_qla_host *);
  436. static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
  437. struct list_head *);
  438. static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
  439. struct list_head *);
  440. static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
  441. static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
  442. static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
  443. static request_t *qla1280_req_pkt(struct scsi_qla_host *);
  444. static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
  445. unsigned int);
  446. static void qla1280_get_target_parameters(struct scsi_qla_host *,
  447. struct scsi_device *);
  448. static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
  449. static struct qla_driver_setup driver_setup;
  450. /*
  451. * convert scsi data direction to request_t control flags
  452. */
  453. static inline uint16_t
  454. qla1280_data_direction(struct scsi_cmnd *cmnd)
  455. {
  456. switch(cmnd->sc_data_direction) {
  457. case DMA_FROM_DEVICE:
  458. return BIT_5;
  459. case DMA_TO_DEVICE:
  460. return BIT_6;
  461. case DMA_BIDIRECTIONAL:
  462. return BIT_5 | BIT_6;
  463. /*
  464. * We could BUG() on default here if one of the four cases aren't
  465. * met, but then again if we receive something like that from the
  466. * SCSI layer we have more serious problems. This shuts up GCC.
  467. */
  468. case DMA_NONE:
  469. default:
  470. return 0;
  471. }
  472. }
  473. #if DEBUG_QLA1280
  474. static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
  475. static void __qla1280_dump_buffer(char *, int);
  476. #endif
  477. /*
  478. * insmod needs to find the variable and make it point to something
  479. */
  480. #ifdef MODULE
  481. static char *qla1280;
  482. /* insmod qla1280 options=verbose" */
  483. module_param(qla1280, charp, 0);
  484. #else
  485. __setup("qla1280=", qla1280_setup);
  486. #endif
  487. /*
  488. * We use the scsi_pointer structure that's included with each scsi_command
  489. * to overlay our struct srb over it. qla1280_init() checks that a srb is not
  490. * bigger than a scsi_pointer.
  491. */
  492. #define CMD_SP(Cmnd) &Cmnd->SCp
  493. #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
  494. #define CMD_CDBP(Cmnd) Cmnd->cmnd
  495. #define CMD_SNSP(Cmnd) Cmnd->sense_buffer
  496. #define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
  497. #define CMD_RESULT(Cmnd) Cmnd->result
  498. #define CMD_HANDLE(Cmnd) Cmnd->host_scribble
  499. #define CMD_REQUEST(Cmnd) Cmnd->request->cmd
  500. #define CMD_HOST(Cmnd) Cmnd->device->host
  501. #define SCSI_BUS_32(Cmnd) Cmnd->device->channel
  502. #define SCSI_TCN_32(Cmnd) Cmnd->device->id
  503. #define SCSI_LUN_32(Cmnd) Cmnd->device->lun
  504. /*****************************************/
  505. /* ISP Boards supported by this driver */
  506. /*****************************************/
  507. struct qla_boards {
  508. char *name; /* Board ID String */
  509. int numPorts; /* Number of SCSI ports */
  510. int fw_index; /* index into qla1280_fw_tbl for firmware */
  511. };
  512. /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
  513. static struct pci_device_id qla1280_pci_tbl[] = {
  514. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
  515. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  516. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
  517. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
  518. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
  519. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
  520. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
  521. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
  522. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
  523. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
  524. {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
  525. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
  526. {0,}
  527. };
  528. MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
  529. DEFINE_MUTEX(qla1280_firmware_mutex);
  530. struct qla_fw {
  531. char *fwname;
  532. const struct firmware *fw;
  533. };
  534. #define QL_NUM_FW_IMAGES 3
  535. struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
  536. {"qlogic/1040.bin", NULL}, /* image 0 */
  537. {"qlogic/1280.bin", NULL}, /* image 1 */
  538. {"qlogic/12160.bin", NULL}, /* image 2 */
  539. };
  540. /* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */
  541. static struct qla_boards ql1280_board_tbl[] = {
  542. {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
  543. {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
  544. {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
  545. {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
  546. {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
  547. {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
  548. {.name = " ", .numPorts = 0, .fw_index = -1},
  549. };
  550. static int qla1280_verbose = 1;
  551. #if DEBUG_QLA1280
  552. static int ql_debug_level = 1;
  553. #define dprintk(level, format, a...) \
  554. do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
  555. #define qla1280_dump_buffer(level, buf, size) \
  556. if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
  557. #define qla1280_print_scsi_cmd(level, cmd) \
  558. if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
  559. #else
  560. #define ql_debug_level 0
  561. #define dprintk(level, format, a...) do{}while(0)
  562. #define qla1280_dump_buffer(a, b, c) do{}while(0)
  563. #define qla1280_print_scsi_cmd(a, b) do{}while(0)
  564. #endif
  565. #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
  566. #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
  567. #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
  568. #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
  569. static int qla1280_read_nvram(struct scsi_qla_host *ha)
  570. {
  571. uint16_t *wptr;
  572. uint8_t chksum;
  573. int cnt, i;
  574. struct nvram *nv;
  575. ENTER("qla1280_read_nvram");
  576. if (driver_setup.no_nvram)
  577. return 1;
  578. printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
  579. wptr = (uint16_t *)&ha->nvram;
  580. nv = &ha->nvram;
  581. chksum = 0;
  582. for (cnt = 0; cnt < 3; cnt++) {
  583. *wptr = qla1280_get_nvram_word(ha, cnt);
  584. chksum += *wptr & 0xff;
  585. chksum += (*wptr >> 8) & 0xff;
  586. wptr++;
  587. }
  588. if (nv->id0 != 'I' || nv->id1 != 'S' ||
  589. nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
  590. dprintk(2, "Invalid nvram ID or version!\n");
  591. chksum = 1;
  592. } else {
  593. for (; cnt < sizeof(struct nvram); cnt++) {
  594. *wptr = qla1280_get_nvram_word(ha, cnt);
  595. chksum += *wptr & 0xff;
  596. chksum += (*wptr >> 8) & 0xff;
  597. wptr++;
  598. }
  599. }
  600. dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
  601. " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
  602. nv->version);
  603. if (chksum) {
  604. if (!driver_setup.no_nvram)
  605. printk(KERN_WARNING "scsi(%ld): Unable to identify or "
  606. "validate NVRAM checksum, using default "
  607. "settings\n", ha->host_no);
  608. ha->nvram_valid = 0;
  609. } else
  610. ha->nvram_valid = 1;
  611. /* The firmware interface is, um, interesting, in that the
  612. * actual firmware image on the chip is little endian, thus,
  613. * the process of taking that image to the CPU would end up
  614. * little endian. However, the firmware interface requires it
  615. * to be read a word (two bytes) at a time.
  616. *
  617. * The net result of this would be that the word (and
  618. * doubleword) quantites in the firmware would be correct, but
  619. * the bytes would be pairwise reversed. Since most of the
  620. * firmware quantites are, in fact, bytes, we do an extra
  621. * le16_to_cpu() in the firmware read routine.
  622. *
  623. * The upshot of all this is that the bytes in the firmware
  624. * are in the correct places, but the 16 and 32 bit quantites
  625. * are still in little endian format. We fix that up below by
  626. * doing extra reverses on them */
  627. nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
  628. nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
  629. for(i = 0; i < MAX_BUSES; i++) {
  630. nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
  631. nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
  632. }
  633. dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
  634. LEAVE("qla1280_read_nvram");
  635. return chksum;
  636. }
  637. /**************************************************************************
  638. * qla1280_info
  639. * Return a string describing the driver.
  640. **************************************************************************/
  641. static const char *
  642. qla1280_info(struct Scsi_Host *host)
  643. {
  644. static char qla1280_scsi_name_buffer[125];
  645. char *bp;
  646. struct scsi_qla_host *ha;
  647. struct qla_boards *bdp;
  648. bp = &qla1280_scsi_name_buffer[0];
  649. ha = (struct scsi_qla_host *)host->hostdata;
  650. bdp = &ql1280_board_tbl[ha->devnum];
  651. memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
  652. sprintf (bp,
  653. "QLogic %s PCI to SCSI Host Adapter\n"
  654. " Firmware version: %2d.%02d.%02d, Driver version %s",
  655. &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
  656. QLA1280_VERSION);
  657. return bp;
  658. }
  659. /**************************************************************************
  660. * qla1280_queuecommand
  661. * Queue a command to the controller.
  662. *
  663. * Note:
  664. * The mid-level driver tries to ensures that queuecommand never gets invoked
  665. * concurrently with itself or the interrupt handler (although the
  666. * interrupt handler may call this routine as part of request-completion
  667. * handling). Unfortunely, it sometimes calls the scheduler in interrupt
  668. * context which is a big NO! NO!.
  669. **************************************************************************/
  670. static int
  671. qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
  672. {
  673. struct Scsi_Host *host = cmd->device->host;
  674. struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
  675. struct srb *sp = (struct srb *)CMD_SP(cmd);
  676. int status;
  677. cmd->scsi_done = fn;
  678. sp->cmd = cmd;
  679. sp->flags = 0;
  680. sp->wait = NULL;
  681. CMD_HANDLE(cmd) = (unsigned char *)NULL;
  682. qla1280_print_scsi_cmd(5, cmd);
  683. #ifdef QLA_64BIT_PTR
  684. /*
  685. * Using 64 bit commands if the PCI bridge doesn't support it is a
  686. * bit wasteful, however this should really only happen if one's
  687. * PCI controller is completely broken, like the BCM1250. For
  688. * sane hardware this is not an issue.
  689. */
  690. status = qla1280_64bit_start_scsi(ha, sp);
  691. #else
  692. status = qla1280_32bit_start_scsi(ha, sp);
  693. #endif
  694. return status;
  695. }
  696. static DEF_SCSI_QCMD(qla1280_queuecommand)
  697. enum action {
  698. ABORT_COMMAND,
  699. DEVICE_RESET,
  700. BUS_RESET,
  701. ADAPTER_RESET,
  702. };
  703. static void qla1280_mailbox_timeout(unsigned long __data)
  704. {
  705. struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
  706. struct device_reg __iomem *reg;
  707. reg = ha->iobase;
  708. ha->mailbox_out[0] = RD_REG_WORD(&reg->mailbox0);
  709. printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
  710. "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
  711. RD_REG_WORD(&reg->ictrl), RD_REG_WORD(&reg->istatus));
  712. complete(ha->mailbox_wait);
  713. }
  714. static int
  715. _qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
  716. struct completion *wait)
  717. {
  718. int status = FAILED;
  719. struct scsi_cmnd *cmd = sp->cmd;
  720. spin_unlock_irq(ha->host->host_lock);
  721. wait_for_completion_timeout(wait, 4*HZ);
  722. spin_lock_irq(ha->host->host_lock);
  723. sp->wait = NULL;
  724. if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
  725. status = SUCCESS;
  726. (*cmd->scsi_done)(cmd);
  727. }
  728. return status;
  729. }
  730. static int
  731. qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
  732. {
  733. DECLARE_COMPLETION_ONSTACK(wait);
  734. sp->wait = &wait;
  735. return _qla1280_wait_for_single_command(ha, sp, &wait);
  736. }
  737. static int
  738. qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
  739. {
  740. int cnt;
  741. int status;
  742. struct srb *sp;
  743. struct scsi_cmnd *cmd;
  744. status = SUCCESS;
  745. /*
  746. * Wait for all commands with the designated bus/target
  747. * to be completed by the firmware
  748. */
  749. for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
  750. sp = ha->outstanding_cmds[cnt];
  751. if (sp) {
  752. cmd = sp->cmd;
  753. if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
  754. continue;
  755. if (target >= 0 && SCSI_TCN_32(cmd) != target)
  756. continue;
  757. status = qla1280_wait_for_single_command(ha, sp);
  758. if (status == FAILED)
  759. break;
  760. }
  761. }
  762. return status;
  763. }
  764. /**************************************************************************
  765. * qla1280_error_action
  766. * The function will attempt to perform a specified error action and
  767. * wait for the results (or time out).
  768. *
  769. * Input:
  770. * cmd = Linux SCSI command packet of the command that cause the
  771. * bus reset.
  772. * action = error action to take (see action_t)
  773. *
  774. * Returns:
  775. * SUCCESS or FAILED
  776. *
  777. **************************************************************************/
  778. static int
  779. qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
  780. {
  781. struct scsi_qla_host *ha;
  782. int bus, target, lun;
  783. struct srb *sp;
  784. int i, found;
  785. int result=FAILED;
  786. int wait_for_bus=-1;
  787. int wait_for_target = -1;
  788. DECLARE_COMPLETION_ONSTACK(wait);
  789. ENTER("qla1280_error_action");
  790. ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
  791. sp = (struct srb *)CMD_SP(cmd);
  792. bus = SCSI_BUS_32(cmd);
  793. target = SCSI_TCN_32(cmd);
  794. lun = SCSI_LUN_32(cmd);
  795. dprintk(4, "error_action %i, istatus 0x%04x\n", action,
  796. RD_REG_WORD(&ha->iobase->istatus));
  797. dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
  798. RD_REG_WORD(&ha->iobase->host_cmd),
  799. RD_REG_WORD(&ha->iobase->ictrl), jiffies);
  800. if (qla1280_verbose)
  801. printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
  802. "Handle=0x%p, action=0x%x\n",
  803. ha->host_no, cmd, CMD_HANDLE(cmd), action);
  804. /*
  805. * Check to see if we have the command in the outstanding_cmds[]
  806. * array. If not then it must have completed before this error
  807. * action was initiated. If the error_action isn't ABORT_COMMAND
  808. * then the driver must proceed with the requested action.
  809. */
  810. found = -1;
  811. for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
  812. if (sp == ha->outstanding_cmds[i]) {
  813. found = i;
  814. sp->wait = &wait; /* we'll wait for it to complete */
  815. break;
  816. }
  817. }
  818. if (found < 0) { /* driver doesn't have command */
  819. result = SUCCESS;
  820. if (qla1280_verbose) {
  821. printk(KERN_INFO
  822. "scsi(%ld:%d:%d:%d): specified command has "
  823. "already completed.\n", ha->host_no, bus,
  824. target, lun);
  825. }
  826. }
  827. switch (action) {
  828. case ABORT_COMMAND:
  829. dprintk(1, "qla1280: RISC aborting command\n");
  830. /*
  831. * The abort might fail due to race when the host_lock
  832. * is released to issue the abort. As such, we
  833. * don't bother to check the return status.
  834. */
  835. if (found >= 0)
  836. qla1280_abort_command(ha, sp, found);
  837. break;
  838. case DEVICE_RESET:
  839. if (qla1280_verbose)
  840. printk(KERN_INFO
  841. "scsi(%ld:%d:%d:%d): Queueing device reset "
  842. "command.\n", ha->host_no, bus, target, lun);
  843. if (qla1280_device_reset(ha, bus, target) == 0) {
  844. /* issued device reset, set wait conditions */
  845. wait_for_bus = bus;
  846. wait_for_target = target;
  847. }
  848. break;
  849. case BUS_RESET:
  850. if (qla1280_verbose)
  851. printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
  852. "reset.\n", ha->host_no, bus);
  853. if (qla1280_bus_reset(ha, bus) == 0) {
  854. /* issued bus reset, set wait conditions */
  855. wait_for_bus = bus;
  856. }
  857. break;
  858. case ADAPTER_RESET:
  859. default:
  860. if (qla1280_verbose) {
  861. printk(KERN_INFO
  862. "scsi(%ld): Issued ADAPTER RESET\n",
  863. ha->host_no);
  864. printk(KERN_INFO "scsi(%ld): I/O processing will "
  865. "continue automatically\n", ha->host_no);
  866. }
  867. ha->flags.reset_active = 1;
  868. if (qla1280_abort_isp(ha) != 0) { /* it's dead */
  869. result = FAILED;
  870. }
  871. ha->flags.reset_active = 0;
  872. }
  873. /*
  874. * At this point, the host_lock has been released and retaken
  875. * by the issuance of the mailbox command.
  876. * Wait for the command passed in by the mid-layer if it
  877. * was found by the driver. It might have been returned
  878. * between eh recovery steps, hence the check of the "found"
  879. * variable.
  880. */
  881. if (found >= 0)
  882. result = _qla1280_wait_for_single_command(ha, sp, &wait);
  883. if (action == ABORT_COMMAND && result != SUCCESS) {
  884. printk(KERN_WARNING
  885. "scsi(%li:%i:%i:%i): "
  886. "Unable to abort command!\n",
  887. ha->host_no, bus, target, lun);
  888. }
  889. /*
  890. * If the command passed in by the mid-layer has been
  891. * returned by the board, then wait for any additional
  892. * commands which are supposed to complete based upon
  893. * the error action.
  894. *
  895. * All commands are unconditionally returned during a
  896. * call to qla1280_abort_isp(), ADAPTER_RESET. No need
  897. * to wait for them.
  898. */
  899. if (result == SUCCESS && wait_for_bus >= 0) {
  900. result = qla1280_wait_for_pending_commands(ha,
  901. wait_for_bus, wait_for_target);
  902. }
  903. dprintk(1, "RESET returning %d\n", result);
  904. LEAVE("qla1280_error_action");
  905. return result;
  906. }
  907. /**************************************************************************
  908. * qla1280_abort
  909. * Abort the specified SCSI command(s).
  910. **************************************************************************/
  911. static int
  912. qla1280_eh_abort(struct scsi_cmnd * cmd)
  913. {
  914. int rc;
  915. spin_lock_irq(cmd->device->host->host_lock);
  916. rc = qla1280_error_action(cmd, ABORT_COMMAND);
  917. spin_unlock_irq(cmd->device->host->host_lock);
  918. return rc;
  919. }
  920. /**************************************************************************
  921. * qla1280_device_reset
  922. * Reset the specified SCSI device
  923. **************************************************************************/
  924. static int
  925. qla1280_eh_device_reset(struct scsi_cmnd *cmd)
  926. {
  927. int rc;
  928. spin_lock_irq(cmd->device->host->host_lock);
  929. rc = qla1280_error_action(cmd, DEVICE_RESET);
  930. spin_unlock_irq(cmd->device->host->host_lock);
  931. return rc;
  932. }
  933. /**************************************************************************
  934. * qla1280_bus_reset
  935. * Reset the specified bus.
  936. **************************************************************************/
  937. static int
  938. qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
  939. {
  940. int rc;
  941. spin_lock_irq(cmd->device->host->host_lock);
  942. rc = qla1280_error_action(cmd, BUS_RESET);
  943. spin_unlock_irq(cmd->device->host->host_lock);
  944. return rc;
  945. }
  946. /**************************************************************************
  947. * qla1280_adapter_reset
  948. * Reset the specified adapter (both channels)
  949. **************************************************************************/
  950. static int
  951. qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
  952. {
  953. int rc;
  954. spin_lock_irq(cmd->device->host->host_lock);
  955. rc = qla1280_error_action(cmd, ADAPTER_RESET);
  956. spin_unlock_irq(cmd->device->host->host_lock);
  957. return rc;
  958. }
  959. static int
  960. qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
  961. sector_t capacity, int geom[])
  962. {
  963. int heads, sectors, cylinders;
  964. heads = 64;
  965. sectors = 32;
  966. cylinders = (unsigned long)capacity / (heads * sectors);
  967. if (cylinders > 1024) {
  968. heads = 255;
  969. sectors = 63;
  970. cylinders = (unsigned long)capacity / (heads * sectors);
  971. /* if (cylinders > 1023)
  972. cylinders = 1023; */
  973. }
  974. geom[0] = heads;
  975. geom[1] = sectors;
  976. geom[2] = cylinders;
  977. return 0;
  978. }
  979. /* disable risc and host interrupts */
  980. static inline void
  981. qla1280_disable_intrs(struct scsi_qla_host *ha)
  982. {
  983. WRT_REG_WORD(&ha->iobase->ictrl, 0);
  984. RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
  985. }
  986. /* enable risc and host interrupts */
  987. static inline void
  988. qla1280_enable_intrs(struct scsi_qla_host *ha)
  989. {
  990. WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
  991. RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
  992. }
  993. /**************************************************************************
  994. * qla1280_intr_handler
  995. * Handles the H/W interrupt
  996. **************************************************************************/
  997. static irqreturn_t
  998. qla1280_intr_handler(int irq, void *dev_id)
  999. {
  1000. struct scsi_qla_host *ha;
  1001. struct device_reg __iomem *reg;
  1002. u16 data;
  1003. int handled = 0;
  1004. ENTER_INTR ("qla1280_intr_handler");
  1005. ha = (struct scsi_qla_host *)dev_id;
  1006. spin_lock(ha->host->host_lock);
  1007. ha->isr_count++;
  1008. reg = ha->iobase;
  1009. qla1280_disable_intrs(ha);
  1010. data = qla1280_debounce_register(&reg->istatus);
  1011. /* Check for pending interrupts. */
  1012. if (data & RISC_INT) {
  1013. qla1280_isr(ha, &ha->done_q);
  1014. handled = 1;
  1015. }
  1016. if (!list_empty(&ha->done_q))
  1017. qla1280_done(ha);
  1018. spin_unlock(ha->host->host_lock);
  1019. qla1280_enable_intrs(ha);
  1020. LEAVE_INTR("qla1280_intr_handler");
  1021. return IRQ_RETVAL(handled);
  1022. }
  1023. static int
  1024. qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
  1025. {
  1026. uint8_t mr;
  1027. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1028. struct nvram *nv;
  1029. int status, lun;
  1030. nv = &ha->nvram;
  1031. mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
  1032. /* Set Target Parameters. */
  1033. mb[0] = MBC_SET_TARGET_PARAMETERS;
  1034. mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
  1035. mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
  1036. mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
  1037. mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
  1038. mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
  1039. mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
  1040. mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
  1041. mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
  1042. mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
  1043. if (IS_ISP1x160(ha)) {
  1044. mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
  1045. mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
  1046. mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
  1047. nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
  1048. mr |= BIT_6;
  1049. } else {
  1050. mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
  1051. }
  1052. mb[3] |= nv->bus[bus].target[target].sync_period;
  1053. status = qla1280_mailbox_command(ha, mr, mb);
  1054. /* Set Device Queue Parameters. */
  1055. for (lun = 0; lun < MAX_LUNS; lun++) {
  1056. mb[0] = MBC_SET_DEVICE_QUEUE;
  1057. mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
  1058. mb[1] |= lun;
  1059. mb[2] = nv->bus[bus].max_queue_depth;
  1060. mb[3] = nv->bus[bus].target[target].execution_throttle;
  1061. status |= qla1280_mailbox_command(ha, 0x0f, mb);
  1062. }
  1063. if (status)
  1064. printk(KERN_WARNING "scsi(%ld:%i:%i): "
  1065. "qla1280_set_target_parameters() failed\n",
  1066. ha->host_no, bus, target);
  1067. return status;
  1068. }
  1069. /**************************************************************************
  1070. * qla1280_slave_configure
  1071. *
  1072. * Description:
  1073. * Determines the queue depth for a given device. There are two ways
  1074. * a queue depth can be obtained for a tagged queueing device. One
  1075. * way is the default queue depth which is determined by whether
  1076. * If it is defined, then it is used
  1077. * as the default queue depth. Otherwise, we use either 4 or 8 as the
  1078. * default queue depth (dependent on the number of hardware SCBs).
  1079. **************************************************************************/
  1080. static int
  1081. qla1280_slave_configure(struct scsi_device *device)
  1082. {
  1083. struct scsi_qla_host *ha;
  1084. int default_depth = 3;
  1085. int bus = device->channel;
  1086. int target = device->id;
  1087. int status = 0;
  1088. struct nvram *nv;
  1089. unsigned long flags;
  1090. ha = (struct scsi_qla_host *)device->host->hostdata;
  1091. nv = &ha->nvram;
  1092. if (qla1280_check_for_dead_scsi_bus(ha, bus))
  1093. return 1;
  1094. if (device->tagged_supported &&
  1095. (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
  1096. scsi_adjust_queue_depth(device, MSG_ORDERED_TAG,
  1097. ha->bus_settings[bus].hiwat);
  1098. } else {
  1099. scsi_adjust_queue_depth(device, 0, default_depth);
  1100. }
  1101. nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
  1102. nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
  1103. nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
  1104. if (driver_setup.no_sync ||
  1105. (driver_setup.sync_mask &&
  1106. (~driver_setup.sync_mask & (1 << target))))
  1107. nv->bus[bus].target[target].parameter.enable_sync = 0;
  1108. if (driver_setup.no_wide ||
  1109. (driver_setup.wide_mask &&
  1110. (~driver_setup.wide_mask & (1 << target))))
  1111. nv->bus[bus].target[target].parameter.enable_wide = 0;
  1112. if (IS_ISP1x160(ha)) {
  1113. if (driver_setup.no_ppr ||
  1114. (driver_setup.ppr_mask &&
  1115. (~driver_setup.ppr_mask & (1 << target))))
  1116. nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
  1117. }
  1118. spin_lock_irqsave(ha->host->host_lock, flags);
  1119. if (nv->bus[bus].target[target].parameter.enable_sync)
  1120. status = qla1280_set_target_parameters(ha, bus, target);
  1121. qla1280_get_target_parameters(ha, device);
  1122. spin_unlock_irqrestore(ha->host->host_lock, flags);
  1123. return status;
  1124. }
  1125. /*
  1126. * qla1280_done
  1127. * Process completed commands.
  1128. *
  1129. * Input:
  1130. * ha = adapter block pointer.
  1131. */
  1132. static void
  1133. qla1280_done(struct scsi_qla_host *ha)
  1134. {
  1135. struct srb *sp;
  1136. struct list_head *done_q;
  1137. int bus, target, lun;
  1138. struct scsi_cmnd *cmd;
  1139. ENTER("qla1280_done");
  1140. done_q = &ha->done_q;
  1141. while (!list_empty(done_q)) {
  1142. sp = list_entry(done_q->next, struct srb, list);
  1143. list_del(&sp->list);
  1144. cmd = sp->cmd;
  1145. bus = SCSI_BUS_32(cmd);
  1146. target = SCSI_TCN_32(cmd);
  1147. lun = SCSI_LUN_32(cmd);
  1148. switch ((CMD_RESULT(cmd) >> 16)) {
  1149. case DID_RESET:
  1150. /* Issue marker command. */
  1151. if (!ha->flags.abort_isp_active)
  1152. qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
  1153. break;
  1154. case DID_ABORT:
  1155. sp->flags &= ~SRB_ABORT_PENDING;
  1156. sp->flags |= SRB_ABORTED;
  1157. break;
  1158. default:
  1159. break;
  1160. }
  1161. /* Release memory used for this I/O */
  1162. scsi_dma_unmap(cmd);
  1163. /* Call the mid-level driver interrupt handler */
  1164. ha->actthreads--;
  1165. if (sp->wait == NULL)
  1166. (*(cmd)->scsi_done)(cmd);
  1167. else
  1168. complete(sp->wait);
  1169. }
  1170. LEAVE("qla1280_done");
  1171. }
  1172. /*
  1173. * Translates a ISP error to a Linux SCSI error
  1174. */
  1175. static int
  1176. qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
  1177. {
  1178. int host_status = DID_ERROR;
  1179. uint16_t comp_status = le16_to_cpu(sts->comp_status);
  1180. uint16_t state_flags = le16_to_cpu(sts->state_flags);
  1181. uint32_t residual_length = le32_to_cpu(sts->residual_length);
  1182. uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
  1183. #if DEBUG_QLA1280_INTR
  1184. static char *reason[] = {
  1185. "DID_OK",
  1186. "DID_NO_CONNECT",
  1187. "DID_BUS_BUSY",
  1188. "DID_TIME_OUT",
  1189. "DID_BAD_TARGET",
  1190. "DID_ABORT",
  1191. "DID_PARITY",
  1192. "DID_ERROR",
  1193. "DID_RESET",
  1194. "DID_BAD_INTR"
  1195. };
  1196. #endif /* DEBUG_QLA1280_INTR */
  1197. ENTER("qla1280_return_status");
  1198. #if DEBUG_QLA1280_INTR
  1199. /*
  1200. dprintk(1, "qla1280_return_status: compl status = 0x%04x\n",
  1201. comp_status);
  1202. */
  1203. #endif
  1204. switch (comp_status) {
  1205. case CS_COMPLETE:
  1206. host_status = DID_OK;
  1207. break;
  1208. case CS_INCOMPLETE:
  1209. if (!(state_flags & SF_GOT_BUS))
  1210. host_status = DID_NO_CONNECT;
  1211. else if (!(state_flags & SF_GOT_TARGET))
  1212. host_status = DID_BAD_TARGET;
  1213. else if (!(state_flags & SF_SENT_CDB))
  1214. host_status = DID_ERROR;
  1215. else if (!(state_flags & SF_TRANSFERRED_DATA))
  1216. host_status = DID_ERROR;
  1217. else if (!(state_flags & SF_GOT_STATUS))
  1218. host_status = DID_ERROR;
  1219. else if (!(state_flags & SF_GOT_SENSE))
  1220. host_status = DID_ERROR;
  1221. break;
  1222. case CS_RESET:
  1223. host_status = DID_RESET;
  1224. break;
  1225. case CS_ABORTED:
  1226. host_status = DID_ABORT;
  1227. break;
  1228. case CS_TIMEOUT:
  1229. host_status = DID_TIME_OUT;
  1230. break;
  1231. case CS_DATA_OVERRUN:
  1232. dprintk(2, "Data overrun 0x%x\n", residual_length);
  1233. dprintk(2, "qla1280_return_status: response packet data\n");
  1234. qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
  1235. host_status = DID_ERROR;
  1236. break;
  1237. case CS_DATA_UNDERRUN:
  1238. if ((scsi_bufflen(cp) - residual_length) <
  1239. cp->underflow) {
  1240. printk(KERN_WARNING
  1241. "scsi: Underflow detected - retrying "
  1242. "command.\n");
  1243. host_status = DID_ERROR;
  1244. } else {
  1245. scsi_set_resid(cp, residual_length);
  1246. host_status = DID_OK;
  1247. }
  1248. break;
  1249. default:
  1250. host_status = DID_ERROR;
  1251. break;
  1252. }
  1253. #if DEBUG_QLA1280_INTR
  1254. dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
  1255. reason[host_status], scsi_status);
  1256. #endif
  1257. LEAVE("qla1280_return_status");
  1258. return (scsi_status & 0xff) | (host_status << 16);
  1259. }
  1260. /****************************************************************************/
  1261. /* QLogic ISP1280 Hardware Support Functions. */
  1262. /****************************************************************************/
  1263. /*
  1264. * qla1280_initialize_adapter
  1265. * Initialize board.
  1266. *
  1267. * Input:
  1268. * ha = adapter block pointer.
  1269. *
  1270. * Returns:
  1271. * 0 = success
  1272. */
  1273. static int __devinit
  1274. qla1280_initialize_adapter(struct scsi_qla_host *ha)
  1275. {
  1276. struct device_reg __iomem *reg;
  1277. int status;
  1278. int bus;
  1279. unsigned long flags;
  1280. ENTER("qla1280_initialize_adapter");
  1281. /* Clear adapter flags. */
  1282. ha->flags.online = 0;
  1283. ha->flags.disable_host_adapter = 0;
  1284. ha->flags.reset_active = 0;
  1285. ha->flags.abort_isp_active = 0;
  1286. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
  1287. if (ia64_platform_is("sn2")) {
  1288. printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
  1289. "dual channel lockup workaround\n", ha->host_no);
  1290. ha->flags.use_pci_vchannel = 1;
  1291. driver_setup.no_nvram = 1;
  1292. }
  1293. #endif
  1294. /* TODO: implement support for the 1040 nvram format */
  1295. if (IS_ISP1040(ha))
  1296. driver_setup.no_nvram = 1;
  1297. dprintk(1, "Configure PCI space for adapter...\n");
  1298. reg = ha->iobase;
  1299. /* Insure mailbox registers are free. */
  1300. WRT_REG_WORD(&reg->semaphore, 0);
  1301. WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
  1302. WRT_REG_WORD(&reg->host_cmd, HC_CLR_HOST_INT);
  1303. RD_REG_WORD(&reg->host_cmd);
  1304. if (qla1280_read_nvram(ha)) {
  1305. dprintk(2, "qla1280_initialize_adapter: failed to read "
  1306. "NVRAM\n");
  1307. }
  1308. /*
  1309. * It's necessary to grab the spin here as qla1280_mailbox_command
  1310. * needs to be able to drop the lock unconditionally to wait
  1311. * for completion.
  1312. */
  1313. spin_lock_irqsave(ha->host->host_lock, flags);
  1314. status = qla1280_load_firmware(ha);
  1315. if (status) {
  1316. printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
  1317. ha->host_no);
  1318. goto out;
  1319. }
  1320. /* Setup adapter based on NVRAM parameters. */
  1321. dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
  1322. qla1280_nvram_config(ha);
  1323. if (ha->flags.disable_host_adapter) {
  1324. status = 1;
  1325. goto out;
  1326. }
  1327. status = qla1280_init_rings(ha);
  1328. if (status)
  1329. goto out;
  1330. /* Issue SCSI reset, if we can't reset twice then bus is dead */
  1331. for (bus = 0; bus < ha->ports; bus++) {
  1332. if (!ha->bus_settings[bus].disable_scsi_reset &&
  1333. qla1280_bus_reset(ha, bus) &&
  1334. qla1280_bus_reset(ha, bus))
  1335. ha->bus_settings[bus].scsi_bus_dead = 1;
  1336. }
  1337. ha->flags.online = 1;
  1338. out:
  1339. spin_unlock_irqrestore(ha->host->host_lock, flags);
  1340. if (status)
  1341. dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
  1342. LEAVE("qla1280_initialize_adapter");
  1343. return status;
  1344. }
  1345. /*
  1346. * qla1280_request_firmware
  1347. * Acquire firmware for chip. Retain in memory
  1348. * for error recovery.
  1349. *
  1350. * Input:
  1351. * ha = adapter block pointer.
  1352. *
  1353. * Returns:
  1354. * Pointer to firmware image or an error code
  1355. * cast to pointer via ERR_PTR().
  1356. */
  1357. static const struct firmware *
  1358. qla1280_request_firmware(struct scsi_qla_host *ha)
  1359. {
  1360. const struct firmware *fw;
  1361. int err;
  1362. int index;
  1363. char *fwname;
  1364. spin_unlock_irq(ha->host->host_lock);
  1365. mutex_lock(&qla1280_firmware_mutex);
  1366. index = ql1280_board_tbl[ha->devnum].fw_index;
  1367. fw = qla1280_fw_tbl[index].fw;
  1368. if (fw)
  1369. goto out;
  1370. fwname = qla1280_fw_tbl[index].fwname;
  1371. err = request_firmware(&fw, fwname, &ha->pdev->dev);
  1372. if (err) {
  1373. printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
  1374. fwname, err);
  1375. fw = ERR_PTR(err);
  1376. goto unlock;
  1377. }
  1378. if ((fw->size % 2) || (fw->size < 6)) {
  1379. printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
  1380. fw->size, fwname);
  1381. release_firmware(fw);
  1382. fw = ERR_PTR(-EINVAL);
  1383. goto unlock;
  1384. }
  1385. qla1280_fw_tbl[index].fw = fw;
  1386. out:
  1387. ha->fwver1 = fw->data[0];
  1388. ha->fwver2 = fw->data[1];
  1389. ha->fwver3 = fw->data[2];
  1390. unlock:
  1391. mutex_unlock(&qla1280_firmware_mutex);
  1392. spin_lock_irq(ha->host->host_lock);
  1393. return fw;
  1394. }
  1395. /*
  1396. * Chip diagnostics
  1397. * Test chip for proper operation.
  1398. *
  1399. * Input:
  1400. * ha = adapter block pointer.
  1401. *
  1402. * Returns:
  1403. * 0 = success.
  1404. */
  1405. static int
  1406. qla1280_chip_diag(struct scsi_qla_host *ha)
  1407. {
  1408. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1409. struct device_reg __iomem *reg = ha->iobase;
  1410. int status = 0;
  1411. int cnt;
  1412. uint16_t data;
  1413. dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", &reg->id_l);
  1414. dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
  1415. /* Soft reset chip and wait for it to finish. */
  1416. WRT_REG_WORD(&reg->ictrl, ISP_RESET);
  1417. /*
  1418. * We can't do a traditional PCI write flush here by reading
  1419. * back the register. The card will not respond once the reset
  1420. * is in action and we end up with a machine check exception
  1421. * instead. Nothing to do but wait and hope for the best.
  1422. * A portable pci_write_flush(pdev) call would be very useful here.
  1423. */
  1424. udelay(20);
  1425. data = qla1280_debounce_register(&reg->ictrl);
  1426. /*
  1427. * Yet another QLogic gem ;-(
  1428. */
  1429. for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
  1430. udelay(5);
  1431. data = RD_REG_WORD(&reg->ictrl);
  1432. }
  1433. if (!cnt)
  1434. goto fail;
  1435. /* Reset register cleared by chip reset. */
  1436. dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
  1437. WRT_REG_WORD(&reg->cfg_1, 0);
  1438. /* Reset RISC and disable BIOS which
  1439. allows RISC to execute out of RAM. */
  1440. WRT_REG_WORD(&reg->host_cmd, HC_RESET_RISC |
  1441. HC_RELEASE_RISC | HC_DISABLE_BIOS);
  1442. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  1443. data = qla1280_debounce_register(&reg->mailbox0);
  1444. /*
  1445. * I *LOVE* this code!
  1446. */
  1447. for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
  1448. udelay(5);
  1449. data = RD_REG_WORD(&reg->mailbox0);
  1450. }
  1451. if (!cnt)
  1452. goto fail;
  1453. /* Check product ID of chip */
  1454. dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
  1455. if (RD_REG_WORD(&reg->mailbox1) != PROD_ID_1 ||
  1456. (RD_REG_WORD(&reg->mailbox2) != PROD_ID_2 &&
  1457. RD_REG_WORD(&reg->mailbox2) != PROD_ID_2a) ||
  1458. RD_REG_WORD(&reg->mailbox3) != PROD_ID_3 ||
  1459. RD_REG_WORD(&reg->mailbox4) != PROD_ID_4) {
  1460. printk(KERN_INFO "qla1280: Wrong product ID = "
  1461. "0x%x,0x%x,0x%x,0x%x\n",
  1462. RD_REG_WORD(&reg->mailbox1),
  1463. RD_REG_WORD(&reg->mailbox2),
  1464. RD_REG_WORD(&reg->mailbox3),
  1465. RD_REG_WORD(&reg->mailbox4));
  1466. goto fail;
  1467. }
  1468. /*
  1469. * Enable ints early!!!
  1470. */
  1471. qla1280_enable_intrs(ha);
  1472. dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
  1473. /* Wrap Incoming Mailboxes Test. */
  1474. mb[0] = MBC_MAILBOX_REGISTER_TEST;
  1475. mb[1] = 0xAAAA;
  1476. mb[2] = 0x5555;
  1477. mb[3] = 0xAA55;
  1478. mb[4] = 0x55AA;
  1479. mb[5] = 0xA5A5;
  1480. mb[6] = 0x5A5A;
  1481. mb[7] = 0x2525;
  1482. status = qla1280_mailbox_command(ha, 0xff, mb);
  1483. if (status)
  1484. goto fail;
  1485. if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
  1486. mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
  1487. mb[7] != 0x2525) {
  1488. printk(KERN_INFO "qla1280: Failed mbox check\n");
  1489. goto fail;
  1490. }
  1491. dprintk(3, "qla1280_chip_diag: exiting normally\n");
  1492. return 0;
  1493. fail:
  1494. dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
  1495. return status;
  1496. }
  1497. static int
  1498. qla1280_load_firmware_pio(struct scsi_qla_host *ha)
  1499. {
  1500. /* enter with host_lock acquired */
  1501. const struct firmware *fw;
  1502. const __le16 *fw_data;
  1503. uint16_t risc_address, risc_code_size;
  1504. uint16_t mb[MAILBOX_REGISTER_COUNT], i;
  1505. int err = 0;
  1506. fw = qla1280_request_firmware(ha);
  1507. if (IS_ERR(fw))
  1508. return PTR_ERR(fw);
  1509. fw_data = (const __le16 *)&fw->data[0];
  1510. ha->fwstart = __le16_to_cpu(fw_data[2]);
  1511. /* Load RISC code. */
  1512. risc_address = ha->fwstart;
  1513. fw_data = (const __le16 *)&fw->data[6];
  1514. risc_code_size = (fw->size - 6) / 2;
  1515. for (i = 0; i < risc_code_size; i++) {
  1516. mb[0] = MBC_WRITE_RAM_WORD;
  1517. mb[1] = risc_address + i;
  1518. mb[2] = __le16_to_cpu(fw_data[i]);
  1519. err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
  1520. if (err) {
  1521. printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
  1522. ha->host_no);
  1523. break;
  1524. }
  1525. }
  1526. return err;
  1527. }
  1528. #define DUMP_IT_BACK 0 /* for debug of RISC loading */
  1529. static int
  1530. qla1280_load_firmware_dma(struct scsi_qla_host *ha)
  1531. {
  1532. /* enter with host_lock acquired */
  1533. const struct firmware *fw;
  1534. const __le16 *fw_data;
  1535. uint16_t risc_address, risc_code_size;
  1536. uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
  1537. int err = 0, num, i;
  1538. #if DUMP_IT_BACK
  1539. uint8_t *sp, *tbuf;
  1540. dma_addr_t p_tbuf;
  1541. tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
  1542. if (!tbuf)
  1543. return -ENOMEM;
  1544. #endif
  1545. fw = qla1280_request_firmware(ha);
  1546. if (IS_ERR(fw))
  1547. return PTR_ERR(fw);
  1548. fw_data = (const __le16 *)&fw->data[0];
  1549. ha->fwstart = __le16_to_cpu(fw_data[2]);
  1550. /* Load RISC code. */
  1551. risc_address = ha->fwstart;
  1552. fw_data = (const __le16 *)&fw->data[6];
  1553. risc_code_size = (fw->size - 6) / 2;
  1554. dprintk(1, "%s: DMA RISC code (%i) words\n",
  1555. __func__, risc_code_size);
  1556. num = 0;
  1557. while (risc_code_size > 0) {
  1558. int warn __attribute__((unused)) = 0;
  1559. cnt = 2000 >> 1;
  1560. if (cnt > risc_code_size)
  1561. cnt = risc_code_size;
  1562. dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
  1563. "%d,%d(0x%x)\n",
  1564. fw_data, cnt, num, risc_address);
  1565. for(i = 0; i < cnt; i++)
  1566. ((__le16 *)ha->request_ring)[i] = fw_data[i];
  1567. mb[0] = MBC_LOAD_RAM;
  1568. mb[1] = risc_address;
  1569. mb[4] = cnt;
  1570. mb[3] = ha->request_dma & 0xffff;
  1571. mb[2] = (ha->request_dma >> 16) & 0xffff;
  1572. mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
  1573. mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
  1574. dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
  1575. __func__, mb[0],
  1576. (void *)(long)ha->request_dma,
  1577. mb[6], mb[7], mb[2], mb[3]);
  1578. err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
  1579. BIT_1 | BIT_0, mb);
  1580. if (err) {
  1581. printk(KERN_ERR "scsi(%li): Failed to load partial "
  1582. "segment of f\n", ha->host_no);
  1583. goto out;
  1584. }
  1585. #if DUMP_IT_BACK
  1586. mb[0] = MBC_DUMP_RAM;
  1587. mb[1] = risc_address;
  1588. mb[4] = cnt;
  1589. mb[3] = p_tbuf & 0xffff;
  1590. mb[2] = (p_tbuf >> 16) & 0xffff;
  1591. mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
  1592. mb[6] = pci_dma_hi32(p_tbuf) >> 16;
  1593. err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
  1594. BIT_1 | BIT_0, mb);
  1595. if (err) {
  1596. printk(KERN_ERR
  1597. "Failed to dump partial segment of f/w\n");
  1598. goto out;
  1599. }
  1600. sp = (uint8_t *)ha->request_ring;
  1601. for (i = 0; i < (cnt << 1); i++) {
  1602. if (tbuf[i] != sp[i] && warn++ < 10) {
  1603. printk(KERN_ERR "%s: FW compare error @ "
  1604. "byte(0x%x) loop#=%x\n",
  1605. __func__, i, num);
  1606. printk(KERN_ERR "%s: FWbyte=%x "
  1607. "FWfromChip=%x\n",
  1608. __func__, sp[i], tbuf[i]);
  1609. /*break; */
  1610. }
  1611. }
  1612. #endif
  1613. risc_address += cnt;
  1614. risc_code_size = risc_code_size - cnt;
  1615. fw_data = fw_data + cnt;
  1616. num++;
  1617. }
  1618. out:
  1619. #if DUMP_IT_BACK
  1620. pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
  1621. #endif
  1622. return err;
  1623. }
  1624. static int
  1625. qla1280_start_firmware(struct scsi_qla_host *ha)
  1626. {
  1627. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1628. int err;
  1629. dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
  1630. __func__);
  1631. /* Verify checksum of loaded RISC code. */
  1632. mb[0] = MBC_VERIFY_CHECKSUM;
  1633. /* mb[1] = ql12_risc_code_addr01; */
  1634. mb[1] = ha->fwstart;
  1635. err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  1636. if (err) {
  1637. printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
  1638. return err;
  1639. }
  1640. /* Start firmware execution. */
  1641. dprintk(1, "%s: start firmware running.\n", __func__);
  1642. mb[0] = MBC_EXECUTE_FIRMWARE;
  1643. mb[1] = ha->fwstart;
  1644. err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
  1645. if (err) {
  1646. printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
  1647. ha->host_no);
  1648. }
  1649. return err;
  1650. }
  1651. static int
  1652. qla1280_load_firmware(struct scsi_qla_host *ha)
  1653. {
  1654. /* enter with host_lock taken */
  1655. int err;
  1656. err = qla1280_chip_diag(ha);
  1657. if (err)
  1658. goto out;
  1659. if (IS_ISP1040(ha))
  1660. err = qla1280_load_firmware_pio(ha);
  1661. else
  1662. err = qla1280_load_firmware_dma(ha);
  1663. if (err)
  1664. goto out;
  1665. err = qla1280_start_firmware(ha);
  1666. out:
  1667. return err;
  1668. }
  1669. /*
  1670. * Initialize rings
  1671. *
  1672. * Input:
  1673. * ha = adapter block pointer.
  1674. * ha->request_ring = request ring virtual address
  1675. * ha->response_ring = response ring virtual address
  1676. * ha->request_dma = request ring physical address
  1677. * ha->response_dma = response ring physical address
  1678. *
  1679. * Returns:
  1680. * 0 = success.
  1681. */
  1682. static int
  1683. qla1280_init_rings(struct scsi_qla_host *ha)
  1684. {
  1685. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1686. int status = 0;
  1687. ENTER("qla1280_init_rings");
  1688. /* Clear outstanding commands array. */
  1689. memset(ha->outstanding_cmds, 0,
  1690. sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
  1691. /* Initialize request queue. */
  1692. ha->request_ring_ptr = ha->request_ring;
  1693. ha->req_ring_index = 0;
  1694. ha->req_q_cnt = REQUEST_ENTRY_CNT;
  1695. /* mb[0] = MBC_INIT_REQUEST_QUEUE; */
  1696. mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
  1697. mb[1] = REQUEST_ENTRY_CNT;
  1698. mb[3] = ha->request_dma & 0xffff;
  1699. mb[2] = (ha->request_dma >> 16) & 0xffff;
  1700. mb[4] = 0;
  1701. mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
  1702. mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
  1703. if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
  1704. BIT_3 | BIT_2 | BIT_1 | BIT_0,
  1705. &mb[0]))) {
  1706. /* Initialize response queue. */
  1707. ha->response_ring_ptr = ha->response_ring;
  1708. ha->rsp_ring_index = 0;
  1709. /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
  1710. mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
  1711. mb[1] = RESPONSE_ENTRY_CNT;
  1712. mb[3] = ha->response_dma & 0xffff;
  1713. mb[2] = (ha->response_dma >> 16) & 0xffff;
  1714. mb[5] = 0;
  1715. mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
  1716. mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
  1717. status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
  1718. BIT_3 | BIT_2 | BIT_1 | BIT_0,
  1719. &mb[0]);
  1720. }
  1721. if (status)
  1722. dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
  1723. LEAVE("qla1280_init_rings");
  1724. return status;
  1725. }
  1726. static void
  1727. qla1280_print_settings(struct nvram *nv)
  1728. {
  1729. dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
  1730. nv->bus[0].config_1.initiator_id);
  1731. dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
  1732. nv->bus[1].config_1.initiator_id);
  1733. dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
  1734. nv->bus[0].bus_reset_delay);
  1735. dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
  1736. nv->bus[1].bus_reset_delay);
  1737. dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
  1738. dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
  1739. dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
  1740. dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
  1741. dprintk(1, "qla1280 : async data setup time[0]=%d\n",
  1742. nv->bus[0].config_2.async_data_setup_time);
  1743. dprintk(1, "qla1280 : async data setup time[1]=%d\n",
  1744. nv->bus[1].config_2.async_data_setup_time);
  1745. dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
  1746. nv->bus[0].config_2.req_ack_active_negation);
  1747. dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
  1748. nv->bus[1].config_2.req_ack_active_negation);
  1749. dprintk(1, "qla1280 : data line active negation[0]=%d\n",
  1750. nv->bus[0].config_2.data_line_active_negation);
  1751. dprintk(1, "qla1280 : data line active negation[1]=%d\n",
  1752. nv->bus[1].config_2.data_line_active_negation);
  1753. dprintk(1, "qla1280 : disable loading risc code=%d\n",
  1754. nv->cntr_flags_1.disable_loading_risc_code);
  1755. dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
  1756. nv->cntr_flags_1.enable_64bit_addressing);
  1757. dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
  1758. nv->bus[0].selection_timeout);
  1759. dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
  1760. nv->bus[1].selection_timeout);
  1761. dprintk(1, "qla1280 : max queue depth[0]=%d\n",
  1762. nv->bus[0].max_queue_depth);
  1763. dprintk(1, "qla1280 : max queue depth[1]=%d\n",
  1764. nv->bus[1].max_queue_depth);
  1765. }
  1766. static void
  1767. qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
  1768. {
  1769. struct nvram *nv = &ha->nvram;
  1770. nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
  1771. nv->bus[bus].target[target].parameter.auto_request_sense = 1;
  1772. nv->bus[bus].target[target].parameter.tag_queuing = 1;
  1773. nv->bus[bus].target[target].parameter.enable_sync = 1;
  1774. #if 1 /* Some SCSI Processors do not seem to like this */
  1775. nv->bus[bus].target[target].parameter.enable_wide = 1;
  1776. #endif
  1777. nv->bus[bus].target[target].execution_throttle =
  1778. nv->bus[bus].max_queue_depth - 1;
  1779. nv->bus[bus].target[target].parameter.parity_checking = 1;
  1780. nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
  1781. if (IS_ISP1x160(ha)) {
  1782. nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
  1783. nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
  1784. nv->bus[bus].target[target].sync_period = 9;
  1785. nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
  1786. nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
  1787. nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
  1788. } else {
  1789. nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
  1790. nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
  1791. nv->bus[bus].target[target].sync_period = 10;
  1792. }
  1793. }
  1794. static void
  1795. qla1280_set_defaults(struct scsi_qla_host *ha)
  1796. {
  1797. struct nvram *nv = &ha->nvram;
  1798. int bus, target;
  1799. dprintk(1, "Using defaults for NVRAM: \n");
  1800. memset(nv, 0, sizeof(struct nvram));
  1801. /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
  1802. nv->firmware_feature.f.enable_fast_posting = 1;
  1803. nv->firmware_feature.f.disable_synchronous_backoff = 1;
  1804. nv->termination.scsi_bus_0_control = 3;
  1805. nv->termination.scsi_bus_1_control = 3;
  1806. nv->termination.auto_term_support = 1;
  1807. /*
  1808. * Set default FIFO magic - What appropriate values would be here
  1809. * is unknown. This is what I have found testing with 12160s.
  1810. *
  1811. * Now, I would love the magic decoder ring for this one, the
  1812. * header file provided by QLogic seems to be bogus or incomplete
  1813. * at best.
  1814. */
  1815. nv->isp_config.burst_enable = 1;
  1816. if (IS_ISP1040(ha))
  1817. nv->isp_config.fifo_threshold |= 3;
  1818. else
  1819. nv->isp_config.fifo_threshold |= 4;
  1820. if (IS_ISP1x160(ha))
  1821. nv->isp_parameter = 0x01; /* fast memory enable */
  1822. for (bus = 0; bus < MAX_BUSES; bus++) {
  1823. nv->bus[bus].config_1.initiator_id = 7;
  1824. nv->bus[bus].config_2.req_ack_active_negation = 1;
  1825. nv->bus[bus].config_2.data_line_active_negation = 1;
  1826. nv->bus[bus].selection_timeout = 250;
  1827. nv->bus[bus].max_queue_depth = 32;
  1828. if (IS_ISP1040(ha)) {
  1829. nv->bus[bus].bus_reset_delay = 3;
  1830. nv->bus[bus].config_2.async_data_setup_time = 6;
  1831. nv->bus[bus].retry_delay = 1;
  1832. } else {
  1833. nv->bus[bus].bus_reset_delay = 5;
  1834. nv->bus[bus].config_2.async_data_setup_time = 8;
  1835. }
  1836. for (target = 0; target < MAX_TARGETS; target++)
  1837. qla1280_set_target_defaults(ha, bus, target);
  1838. }
  1839. }
  1840. static int
  1841. qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
  1842. {
  1843. struct nvram *nv = &ha->nvram;
  1844. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1845. int status, lun;
  1846. uint16_t flag;
  1847. /* Set Target Parameters. */
  1848. mb[0] = MBC_SET_TARGET_PARAMETERS;
  1849. mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
  1850. /*
  1851. * Do not enable sync and ppr for the initial INQUIRY run. We
  1852. * enable this later if we determine the target actually
  1853. * supports it.
  1854. */
  1855. mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
  1856. | TP_WIDE | TP_PARITY | TP_DISCONNECT);
  1857. if (IS_ISP1x160(ha))
  1858. mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
  1859. else
  1860. mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
  1861. mb[3] |= nv->bus[bus].target[target].sync_period;
  1862. status = qla1280_mailbox_command(ha, 0x0f, mb);
  1863. /* Save Tag queuing enable flag. */
  1864. flag = (BIT_0 << target);
  1865. if (nv->bus[bus].target[target].parameter.tag_queuing)
  1866. ha->bus_settings[bus].qtag_enables |= flag;
  1867. /* Save Device enable flag. */
  1868. if (IS_ISP1x160(ha)) {
  1869. if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
  1870. ha->bus_settings[bus].device_enables |= flag;
  1871. ha->bus_settings[bus].lun_disables |= 0;
  1872. } else {
  1873. if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
  1874. ha->bus_settings[bus].device_enables |= flag;
  1875. /* Save LUN disable flag. */
  1876. if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
  1877. ha->bus_settings[bus].lun_disables |= flag;
  1878. }
  1879. /* Set Device Queue Parameters. */
  1880. for (lun = 0; lun < MAX_LUNS; lun++) {
  1881. mb[0] = MBC_SET_DEVICE_QUEUE;
  1882. mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
  1883. mb[1] |= lun;
  1884. mb[2] = nv->bus[bus].max_queue_depth;
  1885. mb[3] = nv->bus[bus].target[target].execution_throttle;
  1886. status |= qla1280_mailbox_command(ha, 0x0f, mb);
  1887. }
  1888. return status;
  1889. }
  1890. static int
  1891. qla1280_config_bus(struct scsi_qla_host *ha, int bus)
  1892. {
  1893. struct nvram *nv = &ha->nvram;
  1894. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1895. int target, status;
  1896. /* SCSI Reset Disable. */
  1897. ha->bus_settings[bus].disable_scsi_reset =
  1898. nv->bus[bus].config_1.scsi_reset_disable;
  1899. /* Initiator ID. */
  1900. ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
  1901. mb[0] = MBC_SET_INITIATOR_ID;
  1902. mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
  1903. ha->bus_settings[bus].id;
  1904. status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
  1905. /* Reset Delay. */
  1906. ha->bus_settings[bus].bus_reset_delay =
  1907. nv->bus[bus].bus_reset_delay;
  1908. /* Command queue depth per device. */
  1909. ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
  1910. /* Set target parameters. */
  1911. for (target = 0; target < MAX_TARGETS; target++)
  1912. status |= qla1280_config_target(ha, bus, target);
  1913. return status;
  1914. }
  1915. static int
  1916. qla1280_nvram_config(struct scsi_qla_host *ha)
  1917. {
  1918. struct device_reg __iomem *reg = ha->iobase;
  1919. struct nvram *nv = &ha->nvram;
  1920. int bus, target, status = 0;
  1921. uint16_t mb[MAILBOX_REGISTER_COUNT];
  1922. ENTER("qla1280_nvram_config");
  1923. if (ha->nvram_valid) {
  1924. /* Always force AUTO sense for LINUX SCSI */
  1925. for (bus = 0; bus < MAX_BUSES; bus++)
  1926. for (target = 0; target < MAX_TARGETS; target++) {
  1927. nv->bus[bus].target[target].parameter.
  1928. auto_request_sense = 1;
  1929. }
  1930. } else {
  1931. qla1280_set_defaults(ha);
  1932. }
  1933. qla1280_print_settings(nv);
  1934. /* Disable RISC load of firmware. */
  1935. ha->flags.disable_risc_code_load =
  1936. nv->cntr_flags_1.disable_loading_risc_code;
  1937. if (IS_ISP1040(ha)) {
  1938. uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
  1939. hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK;
  1940. cfg1 = RD_REG_WORD(&reg->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
  1941. cdma_conf = RD_REG_WORD(&reg->cdma_cfg);
  1942. ddma_conf = RD_REG_WORD(&reg->ddma_cfg);
  1943. /* Busted fifo, says mjacob. */
  1944. if (hwrev != ISP_CFG0_1040A)
  1945. cfg1 |= nv->isp_config.fifo_threshold << 4;
  1946. cfg1 |= nv->isp_config.burst_enable << 2;
  1947. WRT_REG_WORD(&reg->cfg_1, cfg1);
  1948. WRT_REG_WORD(&reg->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
  1949. WRT_REG_WORD(&reg->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
  1950. } else {
  1951. uint16_t cfg1, term;
  1952. /* Set ISP hardware DMA burst */
  1953. cfg1 = nv->isp_config.fifo_threshold << 4;
  1954. cfg1 |= nv->isp_config.burst_enable << 2;
  1955. /* Enable DMA arbitration on dual channel controllers */
  1956. if (ha->ports > 1)
  1957. cfg1 |= BIT_13;
  1958. WRT_REG_WORD(&reg->cfg_1, cfg1);
  1959. /* Set SCSI termination. */
  1960. WRT_REG_WORD(&reg->gpio_enable,
  1961. BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
  1962. term = nv->termination.scsi_bus_1_control;
  1963. term |= nv->termination.scsi_bus_0_control << 2;
  1964. term |= nv->termination.auto_term_support << 7;
  1965. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  1966. WRT_REG_WORD(&reg->gpio_data, term);
  1967. }
  1968. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  1969. /* ISP parameter word. */
  1970. mb[0] = MBC_SET_SYSTEM_PARAMETER;
  1971. mb[1] = nv->isp_parameter;
  1972. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
  1973. if (IS_ISP1x40(ha)) {
  1974. /* clock rate - for qla1240 and older, only */
  1975. mb[0] = MBC_SET_CLOCK_RATE;
  1976. mb[1] = 40;
  1977. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  1978. }
  1979. /* Firmware feature word. */
  1980. mb[0] = MBC_SET_FIRMWARE_FEATURES;
  1981. mb[1] = nv->firmware_feature.f.enable_fast_posting;
  1982. mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
  1983. mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
  1984. #if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
  1985. if (ia64_platform_is("sn2")) {
  1986. printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
  1987. "workaround\n", ha->host_no);
  1988. mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
  1989. }
  1990. #endif
  1991. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  1992. /* Retry count and delay. */
  1993. mb[0] = MBC_SET_RETRY_COUNT;
  1994. mb[1] = nv->bus[0].retry_count;
  1995. mb[2] = nv->bus[0].retry_delay;
  1996. mb[6] = nv->bus[1].retry_count;
  1997. mb[7] = nv->bus[1].retry_delay;
  1998. status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
  1999. BIT_1 | BIT_0, &mb[0]);
  2000. /* ASYNC data setup time. */
  2001. mb[0] = MBC_SET_ASYNC_DATA_SETUP;
  2002. mb[1] = nv->bus[0].config_2.async_data_setup_time;
  2003. mb[2] = nv->bus[1].config_2.async_data_setup_time;
  2004. status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
  2005. /* Active negation states. */
  2006. mb[0] = MBC_SET_ACTIVE_NEGATION;
  2007. mb[1] = 0;
  2008. if (nv->bus[0].config_2.req_ack_active_negation)
  2009. mb[1] |= BIT_5;
  2010. if (nv->bus[0].config_2.data_line_active_negation)
  2011. mb[1] |= BIT_4;
  2012. mb[2] = 0;
  2013. if (nv->bus[1].config_2.req_ack_active_negation)
  2014. mb[2] |= BIT_5;
  2015. if (nv->bus[1].config_2.data_line_active_negation)
  2016. mb[2] |= BIT_4;
  2017. status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
  2018. mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
  2019. mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
  2020. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  2021. /* thingy */
  2022. mb[0] = MBC_SET_PCI_CONTROL;
  2023. mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
  2024. mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
  2025. status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
  2026. mb[0] = MBC_SET_TAG_AGE_LIMIT;
  2027. mb[1] = 8;
  2028. status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
  2029. /* Selection timeout. */
  2030. mb[0] = MBC_SET_SELECTION_TIMEOUT;
  2031. mb[1] = nv->bus[0].selection_timeout;
  2032. mb[2] = nv->bus[1].selection_timeout;
  2033. status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
  2034. for (bus = 0; bus < ha->ports; bus++)
  2035. status |= qla1280_config_bus(ha, bus);
  2036. if (status)
  2037. dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
  2038. LEAVE("qla1280_nvram_config");
  2039. return status;
  2040. }
  2041. /*
  2042. * Get NVRAM data word
  2043. * Calculates word position in NVRAM and calls request routine to
  2044. * get the word from NVRAM.
  2045. *
  2046. * Input:
  2047. * ha = adapter block pointer.
  2048. * address = NVRAM word address.
  2049. *
  2050. * Returns:
  2051. * data word.
  2052. */
  2053. static uint16_t
  2054. qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
  2055. {
  2056. uint32_t nv_cmd;
  2057. uint16_t data;
  2058. nv_cmd = address << 16;
  2059. nv_cmd |= NV_READ_OP;
  2060. data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
  2061. dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
  2062. "0x%x", data);
  2063. return data;
  2064. }
  2065. /*
  2066. * NVRAM request
  2067. * Sends read command to NVRAM and gets data from NVRAM.
  2068. *
  2069. * Input:
  2070. * ha = adapter block pointer.
  2071. * nv_cmd = Bit 26 = start bit
  2072. * Bit 25, 24 = opcode
  2073. * Bit 23-16 = address
  2074. * Bit 15-0 = write data
  2075. *
  2076. * Returns:
  2077. * data word.
  2078. */
  2079. static uint16_t
  2080. qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
  2081. {
  2082. struct device_reg __iomem *reg = ha->iobase;
  2083. int cnt;
  2084. uint16_t data = 0;
  2085. uint16_t reg_data;
  2086. /* Send command to NVRAM. */
  2087. nv_cmd <<= 5;
  2088. for (cnt = 0; cnt < 11; cnt++) {
  2089. if (nv_cmd & BIT_31)
  2090. qla1280_nv_write(ha, NV_DATA_OUT);
  2091. else
  2092. qla1280_nv_write(ha, 0);
  2093. nv_cmd <<= 1;
  2094. }
  2095. /* Read data from NVRAM. */
  2096. for (cnt = 0; cnt < 16; cnt++) {
  2097. WRT_REG_WORD(&reg->nvram, (NV_SELECT | NV_CLOCK));
  2098. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2099. NVRAM_DELAY();
  2100. data <<= 1;
  2101. reg_data = RD_REG_WORD(&reg->nvram);
  2102. if (reg_data & NV_DATA_IN)
  2103. data |= BIT_0;
  2104. WRT_REG_WORD(&reg->nvram, NV_SELECT);
  2105. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2106. NVRAM_DELAY();
  2107. }
  2108. /* Deselect chip. */
  2109. WRT_REG_WORD(&reg->nvram, NV_DESELECT);
  2110. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2111. NVRAM_DELAY();
  2112. return data;
  2113. }
  2114. static void
  2115. qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
  2116. {
  2117. struct device_reg __iomem *reg = ha->iobase;
  2118. WRT_REG_WORD(&reg->nvram, data | NV_SELECT);
  2119. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2120. NVRAM_DELAY();
  2121. WRT_REG_WORD(&reg->nvram, data | NV_SELECT | NV_CLOCK);
  2122. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2123. NVRAM_DELAY();
  2124. WRT_REG_WORD(&reg->nvram, data | NV_SELECT);
  2125. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2126. NVRAM_DELAY();
  2127. }
  2128. /*
  2129. * Mailbox Command
  2130. * Issue mailbox command and waits for completion.
  2131. *
  2132. * Input:
  2133. * ha = adapter block pointer.
  2134. * mr = mailbox registers to load.
  2135. * mb = data pointer for mailbox registers.
  2136. *
  2137. * Output:
  2138. * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data.
  2139. *
  2140. * Returns:
  2141. * 0 = success
  2142. */
  2143. static int
  2144. qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
  2145. {
  2146. struct device_reg __iomem *reg = ha->iobase;
  2147. int status = 0;
  2148. int cnt;
  2149. uint16_t *optr, *iptr;
  2150. uint16_t __iomem *mptr;
  2151. uint16_t data;
  2152. DECLARE_COMPLETION_ONSTACK(wait);
  2153. struct timer_list timer;
  2154. ENTER("qla1280_mailbox_command");
  2155. if (ha->mailbox_wait) {
  2156. printk(KERN_ERR "Warning mailbox wait already in use!\n");
  2157. }
  2158. ha->mailbox_wait = &wait;
  2159. /*
  2160. * We really should start out by verifying that the mailbox is
  2161. * available before starting sending the command data
  2162. */
  2163. /* Load mailbox registers. */
  2164. mptr = (uint16_t __iomem *) &reg->mailbox0;
  2165. iptr = mb;
  2166. for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
  2167. if (mr & BIT_0) {
  2168. WRT_REG_WORD(mptr, (*iptr));
  2169. }
  2170. mr >>= 1;
  2171. mptr++;
  2172. iptr++;
  2173. }
  2174. /* Issue set host interrupt command. */
  2175. /* set up a timer just in case we're really jammed */
  2176. init_timer(&timer);
  2177. timer.expires = jiffies + 20*HZ;
  2178. timer.data = (unsigned long)ha;
  2179. timer.function = qla1280_mailbox_timeout;
  2180. add_timer(&timer);
  2181. spin_unlock_irq(ha->host->host_lock);
  2182. WRT_REG_WORD(&reg->host_cmd, HC_SET_HOST_INT);
  2183. data = qla1280_debounce_register(&reg->istatus);
  2184. wait_for_completion(&wait);
  2185. del_timer_sync(&timer);
  2186. spin_lock_irq(ha->host->host_lock);
  2187. ha->mailbox_wait = NULL;
  2188. /* Check for mailbox command timeout. */
  2189. if (ha->mailbox_out[0] != MBS_CMD_CMP) {
  2190. printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
  2191. "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
  2192. "0x%04x\n",
  2193. mb[0], ha->mailbox_out[0], RD_REG_WORD(&reg->istatus));
  2194. printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
  2195. RD_REG_WORD(&reg->mailbox0), RD_REG_WORD(&reg->mailbox1),
  2196. RD_REG_WORD(&reg->mailbox2), RD_REG_WORD(&reg->mailbox3));
  2197. printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
  2198. RD_REG_WORD(&reg->mailbox4), RD_REG_WORD(&reg->mailbox5),
  2199. RD_REG_WORD(&reg->mailbox6), RD_REG_WORD(&reg->mailbox7));
  2200. status = 1;
  2201. }
  2202. /* Load return mailbox registers. */
  2203. optr = mb;
  2204. iptr = (uint16_t *) &ha->mailbox_out[0];
  2205. mr = MAILBOX_REGISTER_COUNT;
  2206. memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
  2207. if (ha->flags.reset_marker)
  2208. qla1280_rst_aen(ha);
  2209. if (status)
  2210. dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
  2211. "0x%x ****\n", mb[0]);
  2212. LEAVE("qla1280_mailbox_command");
  2213. return status;
  2214. }
  2215. /*
  2216. * qla1280_poll
  2217. * Polls ISP for interrupts.
  2218. *
  2219. * Input:
  2220. * ha = adapter block pointer.
  2221. */
  2222. static void
  2223. qla1280_poll(struct scsi_qla_host *ha)
  2224. {
  2225. struct device_reg __iomem *reg = ha->iobase;
  2226. uint16_t data;
  2227. LIST_HEAD(done_q);
  2228. /* ENTER("qla1280_poll"); */
  2229. /* Check for pending interrupts. */
  2230. data = RD_REG_WORD(&reg->istatus);
  2231. if (data & RISC_INT)
  2232. qla1280_isr(ha, &done_q);
  2233. if (!ha->mailbox_wait) {
  2234. if (ha->flags.reset_marker)
  2235. qla1280_rst_aen(ha);
  2236. }
  2237. if (!list_empty(&done_q))
  2238. qla1280_done(ha);
  2239. /* LEAVE("qla1280_poll"); */
  2240. }
  2241. /*
  2242. * qla1280_bus_reset
  2243. * Issue SCSI bus reset.
  2244. *
  2245. * Input:
  2246. * ha = adapter block pointer.
  2247. * bus = SCSI bus number.
  2248. *
  2249. * Returns:
  2250. * 0 = success
  2251. */
  2252. static int
  2253. qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
  2254. {
  2255. uint16_t mb[MAILBOX_REGISTER_COUNT];
  2256. uint16_t reset_delay;
  2257. int status;
  2258. dprintk(3, "qla1280_bus_reset: entered\n");
  2259. if (qla1280_verbose)
  2260. printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
  2261. ha->host_no, bus);
  2262. reset_delay = ha->bus_settings[bus].bus_reset_delay;
  2263. mb[0] = MBC_BUS_RESET;
  2264. mb[1] = reset_delay;
  2265. mb[2] = (uint16_t) bus;
  2266. status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
  2267. if (status) {
  2268. if (ha->bus_settings[bus].failed_reset_count > 2)
  2269. ha->bus_settings[bus].scsi_bus_dead = 1;
  2270. ha->bus_settings[bus].failed_reset_count++;
  2271. } else {
  2272. spin_unlock_irq(ha->host->host_lock);
  2273. ssleep(reset_delay);
  2274. spin_lock_irq(ha->host->host_lock);
  2275. ha->bus_settings[bus].scsi_bus_dead = 0;
  2276. ha->bus_settings[bus].failed_reset_count = 0;
  2277. ha->bus_settings[bus].reset_marker = 0;
  2278. /* Issue marker command. */
  2279. qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
  2280. }
  2281. /*
  2282. * We should probably call qla1280_set_target_parameters()
  2283. * here as well for all devices on the bus.
  2284. */
  2285. if (status)
  2286. dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
  2287. else
  2288. dprintk(3, "qla1280_bus_reset: exiting normally\n");
  2289. return status;
  2290. }
  2291. /*
  2292. * qla1280_device_reset
  2293. * Issue bus device reset message to the target.
  2294. *
  2295. * Input:
  2296. * ha = adapter block pointer.
  2297. * bus = SCSI BUS number.
  2298. * target = SCSI ID.
  2299. *
  2300. * Returns:
  2301. * 0 = success
  2302. */
  2303. static int
  2304. qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
  2305. {
  2306. uint16_t mb[MAILBOX_REGISTER_COUNT];
  2307. int status;
  2308. ENTER("qla1280_device_reset");
  2309. mb[0] = MBC_ABORT_TARGET;
  2310. mb[1] = (bus ? (target | BIT_7) : target) << 8;
  2311. mb[2] = 1;
  2312. status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
  2313. /* Issue marker command. */
  2314. qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
  2315. if (status)
  2316. dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
  2317. LEAVE("qla1280_device_reset");
  2318. return status;
  2319. }
  2320. /*
  2321. * qla1280_abort_command
  2322. * Abort command aborts a specified IOCB.
  2323. *
  2324. * Input:
  2325. * ha = adapter block pointer.
  2326. * sp = SB structure pointer.
  2327. *
  2328. * Returns:
  2329. * 0 = success
  2330. */
  2331. static int
  2332. qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
  2333. {
  2334. uint16_t mb[MAILBOX_REGISTER_COUNT];
  2335. unsigned int bus, target, lun;
  2336. int status;
  2337. ENTER("qla1280_abort_command");
  2338. bus = SCSI_BUS_32(sp->cmd);
  2339. target = SCSI_TCN_32(sp->cmd);
  2340. lun = SCSI_LUN_32(sp->cmd);
  2341. sp->flags |= SRB_ABORT_PENDING;
  2342. mb[0] = MBC_ABORT_COMMAND;
  2343. mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
  2344. mb[2] = handle >> 16;
  2345. mb[3] = handle & 0xffff;
  2346. status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
  2347. if (status) {
  2348. dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
  2349. sp->flags &= ~SRB_ABORT_PENDING;
  2350. }
  2351. LEAVE("qla1280_abort_command");
  2352. return status;
  2353. }
  2354. /*
  2355. * qla1280_reset_adapter
  2356. * Reset adapter.
  2357. *
  2358. * Input:
  2359. * ha = adapter block pointer.
  2360. */
  2361. static void
  2362. qla1280_reset_adapter(struct scsi_qla_host *ha)
  2363. {
  2364. struct device_reg __iomem *reg = ha->iobase;
  2365. ENTER("qla1280_reset_adapter");
  2366. /* Disable ISP chip */
  2367. ha->flags.online = 0;
  2368. WRT_REG_WORD(&reg->ictrl, ISP_RESET);
  2369. WRT_REG_WORD(&reg->host_cmd,
  2370. HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
  2371. RD_REG_WORD(&reg->id_l); /* Flush PCI write */
  2372. LEAVE("qla1280_reset_adapter");
  2373. }
  2374. /*
  2375. * Issue marker command.
  2376. * Function issues marker IOCB.
  2377. *
  2378. * Input:
  2379. * ha = adapter block pointer.
  2380. * bus = SCSI BUS number
  2381. * id = SCSI ID
  2382. * lun = SCSI LUN
  2383. * type = marker modifier
  2384. */
  2385. static void
  2386. qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
  2387. {
  2388. struct mrk_entry *pkt;
  2389. ENTER("qla1280_marker");
  2390. /* Get request packet. */
  2391. if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
  2392. pkt->entry_type = MARKER_TYPE;
  2393. pkt->lun = (uint8_t) lun;
  2394. pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
  2395. pkt->modifier = type;
  2396. pkt->entry_status = 0;
  2397. /* Issue command to ISP */
  2398. qla1280_isp_cmd(ha);
  2399. }
  2400. LEAVE("qla1280_marker");
  2401. }
  2402. /*
  2403. * qla1280_64bit_start_scsi
  2404. * The start SCSI is responsible for building request packets on
  2405. * request ring and modifying ISP input pointer.
  2406. *
  2407. * Input:
  2408. * ha = adapter block pointer.
  2409. * sp = SB structure pointer.
  2410. *
  2411. * Returns:
  2412. * 0 = success, was able to issue command.
  2413. */
  2414. #ifdef QLA_64BIT_PTR
  2415. static int
  2416. qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
  2417. {
  2418. struct device_reg __iomem *reg = ha->iobase;
  2419. struct scsi_cmnd *cmd = sp->cmd;
  2420. cmd_a64_entry_t *pkt;
  2421. __le32 *dword_ptr;
  2422. dma_addr_t dma_handle;
  2423. int status = 0;
  2424. int cnt;
  2425. int req_cnt;
  2426. int seg_cnt;
  2427. u8 dir;
  2428. ENTER("qla1280_64bit_start_scsi:");
  2429. /* Calculate number of entries and segments required. */
  2430. req_cnt = 1;
  2431. seg_cnt = scsi_dma_map(cmd);
  2432. if (seg_cnt > 0) {
  2433. if (seg_cnt > 2) {
  2434. req_cnt += (seg_cnt - 2) / 5;
  2435. if ((seg_cnt - 2) % 5)
  2436. req_cnt++;
  2437. }
  2438. } else if (seg_cnt < 0) {
  2439. status = 1;
  2440. goto out;
  2441. }
  2442. if ((req_cnt + 2) >= ha->req_q_cnt) {
  2443. /* Calculate number of free request entries. */
  2444. cnt = RD_REG_WORD(&reg->mailbox4);
  2445. if (ha->req_ring_index < cnt)
  2446. ha->req_q_cnt = cnt - ha->req_ring_index;
  2447. else
  2448. ha->req_q_cnt =
  2449. REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
  2450. }
  2451. dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
  2452. ha->req_q_cnt, seg_cnt);
  2453. /* If room for request in request ring. */
  2454. if ((req_cnt + 2) >= ha->req_q_cnt) {
  2455. status = SCSI_MLQUEUE_HOST_BUSY;
  2456. dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
  2457. "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
  2458. req_cnt);
  2459. goto out;
  2460. }
  2461. /* Check for room in outstanding command list. */
  2462. for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
  2463. ha->outstanding_cmds[cnt] != NULL; cnt++);
  2464. if (cnt >= MAX_OUTSTANDING_COMMANDS) {
  2465. status = SCSI_MLQUEUE_HOST_BUSY;
  2466. dprintk(2, "qla1280_start_scsi: NO ROOM IN "
  2467. "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
  2468. goto out;
  2469. }
  2470. ha->outstanding_cmds[cnt] = sp;
  2471. ha->req_q_cnt -= req_cnt;
  2472. CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
  2473. dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
  2474. cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
  2475. dprintk(2, " bus %i, target %i, lun %i\n",
  2476. SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
  2477. qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
  2478. /*
  2479. * Build command packet.
  2480. */
  2481. pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
  2482. pkt->entry_type = COMMAND_A64_TYPE;
  2483. pkt->entry_count = (uint8_t) req_cnt;
  2484. pkt->sys_define = (uint8_t) ha->req_ring_index;
  2485. pkt->entry_status = 0;
  2486. pkt->handle = cpu_to_le32(cnt);
  2487. /* Zero out remaining portion of packet. */
  2488. memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
  2489. /* Set ISP command timeout. */
  2490. pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
  2491. /* Set device target ID and LUN */
  2492. pkt->lun = SCSI_LUN_32(cmd);
  2493. pkt->target = SCSI_BUS_32(cmd) ?
  2494. (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
  2495. /* Enable simple tag queuing if device supports it. */
  2496. if (cmd->device->simple_tags)
  2497. pkt->control_flags |= cpu_to_le16(BIT_3);
  2498. /* Load SCSI command packet. */
  2499. pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
  2500. memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
  2501. /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
  2502. /* Set transfer direction. */
  2503. dir = qla1280_data_direction(cmd);
  2504. pkt->control_flags |= cpu_to_le16(dir);
  2505. /* Set total data segment count. */
  2506. pkt->dseg_count = cpu_to_le16(seg_cnt);
  2507. /*
  2508. * Load data segments.
  2509. */
  2510. if (seg_cnt) { /* If data transfer. */
  2511. struct scatterlist *sg, *s;
  2512. int remseg = seg_cnt;
  2513. sg = scsi_sglist(cmd);
  2514. /* Setup packet address segment pointer. */
  2515. dword_ptr = (u32 *)&pkt->dseg_0_address;
  2516. /* Load command entry data segments. */
  2517. for_each_sg(sg, s, seg_cnt, cnt) {
  2518. if (cnt == 2)
  2519. break;
  2520. dma_handle = sg_dma_address(s);
  2521. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
  2522. if (ha->flags.use_pci_vchannel)
  2523. sn_pci_set_vchan(ha->pdev,
  2524. (unsigned long *)&dma_handle,
  2525. SCSI_BUS_32(cmd));
  2526. #endif
  2527. *dword_ptr++ =
  2528. cpu_to_le32(pci_dma_lo32(dma_handle));
  2529. *dword_ptr++ =
  2530. cpu_to_le32(pci_dma_hi32(dma_handle));
  2531. *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
  2532. dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
  2533. cpu_to_le32(pci_dma_hi32(dma_handle)),
  2534. cpu_to_le32(pci_dma_lo32(dma_handle)),
  2535. cpu_to_le32(sg_dma_len(sg_next(s))));
  2536. remseg--;
  2537. }
  2538. dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
  2539. "command packet data - b %i, t %i, l %i \n",
  2540. SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
  2541. SCSI_LUN_32(cmd));
  2542. qla1280_dump_buffer(5, (char *)pkt,
  2543. REQUEST_ENTRY_SIZE);
  2544. /*
  2545. * Build continuation packets.
  2546. */
  2547. dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
  2548. "remains\n", seg_cnt);
  2549. while (remseg > 0) {
  2550. /* Update sg start */
  2551. sg = s;
  2552. /* Adjust ring index. */
  2553. ha->req_ring_index++;
  2554. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2555. ha->req_ring_index = 0;
  2556. ha->request_ring_ptr =
  2557. ha->request_ring;
  2558. } else
  2559. ha->request_ring_ptr++;
  2560. pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
  2561. /* Zero out packet. */
  2562. memset(pkt, 0, REQUEST_ENTRY_SIZE);
  2563. /* Load packet defaults. */
  2564. ((struct cont_a64_entry *) pkt)->entry_type =
  2565. CONTINUE_A64_TYPE;
  2566. ((struct cont_a64_entry *) pkt)->entry_count = 1;
  2567. ((struct cont_a64_entry *) pkt)->sys_define =
  2568. (uint8_t)ha->req_ring_index;
  2569. /* Setup packet address segment pointer. */
  2570. dword_ptr =
  2571. (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
  2572. /* Load continuation entry data segments. */
  2573. for_each_sg(sg, s, remseg, cnt) {
  2574. if (cnt == 5)
  2575. break;
  2576. dma_handle = sg_dma_address(s);
  2577. #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
  2578. if (ha->flags.use_pci_vchannel)
  2579. sn_pci_set_vchan(ha->pdev,
  2580. (unsigned long *)&dma_handle,
  2581. SCSI_BUS_32(cmd));
  2582. #endif
  2583. *dword_ptr++ =
  2584. cpu_to_le32(pci_dma_lo32(dma_handle));
  2585. *dword_ptr++ =
  2586. cpu_to_le32(pci_dma_hi32(dma_handle));
  2587. *dword_ptr++ =
  2588. cpu_to_le32(sg_dma_len(s));
  2589. dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
  2590. cpu_to_le32(pci_dma_hi32(dma_handle)),
  2591. cpu_to_le32(pci_dma_lo32(dma_handle)),
  2592. cpu_to_le32(sg_dma_len(s)));
  2593. }
  2594. remseg -= cnt;
  2595. dprintk(5, "qla1280_64bit_start_scsi: "
  2596. "continuation packet data - b %i, t "
  2597. "%i, l %i \n", SCSI_BUS_32(cmd),
  2598. SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
  2599. qla1280_dump_buffer(5, (char *)pkt,
  2600. REQUEST_ENTRY_SIZE);
  2601. }
  2602. } else { /* No data transfer */
  2603. dprintk(5, "qla1280_64bit_start_scsi: No data, command "
  2604. "packet data - b %i, t %i, l %i \n",
  2605. SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
  2606. qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
  2607. }
  2608. /* Adjust ring index. */
  2609. ha->req_ring_index++;
  2610. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2611. ha->req_ring_index = 0;
  2612. ha->request_ring_ptr = ha->request_ring;
  2613. } else
  2614. ha->request_ring_ptr++;
  2615. /* Set chip new ring index. */
  2616. dprintk(2,
  2617. "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
  2618. sp->flags |= SRB_SENT;
  2619. ha->actthreads++;
  2620. WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
  2621. /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
  2622. mmiowb();
  2623. out:
  2624. if (status)
  2625. dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
  2626. else
  2627. dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
  2628. return status;
  2629. }
  2630. #else /* !QLA_64BIT_PTR */
  2631. /*
  2632. * qla1280_32bit_start_scsi
  2633. * The start SCSI is responsible for building request packets on
  2634. * request ring and modifying ISP input pointer.
  2635. *
  2636. * The Qlogic firmware interface allows every queue slot to have a SCSI
  2637. * command and up to 4 scatter/gather (SG) entries. If we need more
  2638. * than 4 SG entries, then continuation entries are used that can
  2639. * hold another 7 entries each. The start routine determines if there
  2640. * is eought empty slots then build the combination of requests to
  2641. * fulfill the OS request.
  2642. *
  2643. * Input:
  2644. * ha = adapter block pointer.
  2645. * sp = SCSI Request Block structure pointer.
  2646. *
  2647. * Returns:
  2648. * 0 = success, was able to issue command.
  2649. */
  2650. static int
  2651. qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
  2652. {
  2653. struct device_reg __iomem *reg = ha->iobase;
  2654. struct scsi_cmnd *cmd = sp->cmd;
  2655. struct cmd_entry *pkt;
  2656. __le32 *dword_ptr;
  2657. int status = 0;
  2658. int cnt;
  2659. int req_cnt;
  2660. int seg_cnt;
  2661. u8 dir;
  2662. ENTER("qla1280_32bit_start_scsi");
  2663. dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
  2664. cmd->cmnd[0]);
  2665. /* Calculate number of entries and segments required. */
  2666. req_cnt = 1;
  2667. seg_cnt = scsi_dma_map(cmd);
  2668. if (seg_cnt) {
  2669. /*
  2670. * if greater than four sg entries then we need to allocate
  2671. * continuation entries
  2672. */
  2673. if (seg_cnt > 4) {
  2674. req_cnt += (seg_cnt - 4) / 7;
  2675. if ((seg_cnt - 4) % 7)
  2676. req_cnt++;
  2677. }
  2678. dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
  2679. cmd, seg_cnt, req_cnt);
  2680. } else if (seg_cnt < 0) {
  2681. status = 1;
  2682. goto out;
  2683. }
  2684. if ((req_cnt + 2) >= ha->req_q_cnt) {
  2685. /* Calculate number of free request entries. */
  2686. cnt = RD_REG_WORD(&reg->mailbox4);
  2687. if (ha->req_ring_index < cnt)
  2688. ha->req_q_cnt = cnt - ha->req_ring_index;
  2689. else
  2690. ha->req_q_cnt =
  2691. REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
  2692. }
  2693. dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
  2694. ha->req_q_cnt, seg_cnt);
  2695. /* If room for request in request ring. */
  2696. if ((req_cnt + 2) >= ha->req_q_cnt) {
  2697. status = SCSI_MLQUEUE_HOST_BUSY;
  2698. dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
  2699. "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
  2700. ha->req_q_cnt, req_cnt);
  2701. goto out;
  2702. }
  2703. /* Check for empty slot in outstanding command list. */
  2704. for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
  2705. (ha->outstanding_cmds[cnt] != 0); cnt++) ;
  2706. if (cnt >= MAX_OUTSTANDING_COMMANDS) {
  2707. status = SCSI_MLQUEUE_HOST_BUSY;
  2708. dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
  2709. "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
  2710. goto out;
  2711. }
  2712. CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
  2713. ha->outstanding_cmds[cnt] = sp;
  2714. ha->req_q_cnt -= req_cnt;
  2715. /*
  2716. * Build command packet.
  2717. */
  2718. pkt = (struct cmd_entry *) ha->request_ring_ptr;
  2719. pkt->entry_type = COMMAND_TYPE;
  2720. pkt->entry_count = (uint8_t) req_cnt;
  2721. pkt->sys_define = (uint8_t) ha->req_ring_index;
  2722. pkt->entry_status = 0;
  2723. pkt->handle = cpu_to_le32(cnt);
  2724. /* Zero out remaining portion of packet. */
  2725. memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
  2726. /* Set ISP command timeout. */
  2727. pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
  2728. /* Set device target ID and LUN */
  2729. pkt->lun = SCSI_LUN_32(cmd);
  2730. pkt->target = SCSI_BUS_32(cmd) ?
  2731. (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
  2732. /* Enable simple tag queuing if device supports it. */
  2733. if (cmd->device->simple_tags)
  2734. pkt->control_flags |= cpu_to_le16(BIT_3);
  2735. /* Load SCSI command packet. */
  2736. pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
  2737. memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
  2738. /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
  2739. /* Set transfer direction. */
  2740. dir = qla1280_data_direction(cmd);
  2741. pkt->control_flags |= cpu_to_le16(dir);
  2742. /* Set total data segment count. */
  2743. pkt->dseg_count = cpu_to_le16(seg_cnt);
  2744. /*
  2745. * Load data segments.
  2746. */
  2747. if (seg_cnt) {
  2748. struct scatterlist *sg, *s;
  2749. int remseg = seg_cnt;
  2750. sg = scsi_sglist(cmd);
  2751. /* Setup packet address segment pointer. */
  2752. dword_ptr = &pkt->dseg_0_address;
  2753. dprintk(3, "Building S/G data segments..\n");
  2754. qla1280_dump_buffer(1, (char *)sg, 4 * 16);
  2755. /* Load command entry data segments. */
  2756. for_each_sg(sg, s, seg_cnt, cnt) {
  2757. if (cnt == 4)
  2758. break;
  2759. *dword_ptr++ =
  2760. cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
  2761. *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
  2762. dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
  2763. (pci_dma_lo32(sg_dma_address(s))),
  2764. (sg_dma_len(s)));
  2765. remseg--;
  2766. }
  2767. /*
  2768. * Build continuation packets.
  2769. */
  2770. dprintk(3, "S/G Building Continuation"
  2771. "...seg_cnt=0x%x remains\n", seg_cnt);
  2772. while (remseg > 0) {
  2773. /* Continue from end point */
  2774. sg = s;
  2775. /* Adjust ring index. */
  2776. ha->req_ring_index++;
  2777. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2778. ha->req_ring_index = 0;
  2779. ha->request_ring_ptr =
  2780. ha->request_ring;
  2781. } else
  2782. ha->request_ring_ptr++;
  2783. pkt = (struct cmd_entry *)ha->request_ring_ptr;
  2784. /* Zero out packet. */
  2785. memset(pkt, 0, REQUEST_ENTRY_SIZE);
  2786. /* Load packet defaults. */
  2787. ((struct cont_entry *) pkt)->
  2788. entry_type = CONTINUE_TYPE;
  2789. ((struct cont_entry *) pkt)->entry_count = 1;
  2790. ((struct cont_entry *) pkt)->sys_define =
  2791. (uint8_t) ha->req_ring_index;
  2792. /* Setup packet address segment pointer. */
  2793. dword_ptr =
  2794. &((struct cont_entry *) pkt)->dseg_0_address;
  2795. /* Load continuation entry data segments. */
  2796. for_each_sg(sg, s, remseg, cnt) {
  2797. if (cnt == 7)
  2798. break;
  2799. *dword_ptr++ =
  2800. cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
  2801. *dword_ptr++ =
  2802. cpu_to_le32(sg_dma_len(s));
  2803. dprintk(1,
  2804. "S/G Segment Cont. phys_addr=0x%x, "
  2805. "len=0x%x\n",
  2806. cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
  2807. cpu_to_le32(sg_dma_len(s)));
  2808. }
  2809. remseg -= cnt;
  2810. dprintk(5, "qla1280_32bit_start_scsi: "
  2811. "continuation packet data - "
  2812. "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
  2813. SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
  2814. qla1280_dump_buffer(5, (char *)pkt,
  2815. REQUEST_ENTRY_SIZE);
  2816. }
  2817. } else { /* No data transfer at all */
  2818. dprintk(5, "qla1280_32bit_start_scsi: No data, command "
  2819. "packet data - \n");
  2820. qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
  2821. }
  2822. dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
  2823. qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
  2824. REQUEST_ENTRY_SIZE);
  2825. /* Adjust ring index. */
  2826. ha->req_ring_index++;
  2827. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2828. ha->req_ring_index = 0;
  2829. ha->request_ring_ptr = ha->request_ring;
  2830. } else
  2831. ha->request_ring_ptr++;
  2832. /* Set chip new ring index. */
  2833. dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
  2834. "for pending command\n");
  2835. sp->flags |= SRB_SENT;
  2836. ha->actthreads++;
  2837. WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
  2838. /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
  2839. mmiowb();
  2840. out:
  2841. if (status)
  2842. dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
  2843. LEAVE("qla1280_32bit_start_scsi");
  2844. return status;
  2845. }
  2846. #endif
  2847. /*
  2848. * qla1280_req_pkt
  2849. * Function is responsible for locking ring and
  2850. * getting a zeroed out request packet.
  2851. *
  2852. * Input:
  2853. * ha = adapter block pointer.
  2854. *
  2855. * Returns:
  2856. * 0 = failed to get slot.
  2857. */
  2858. static request_t *
  2859. qla1280_req_pkt(struct scsi_qla_host *ha)
  2860. {
  2861. struct device_reg __iomem *reg = ha->iobase;
  2862. request_t *pkt = NULL;
  2863. int cnt;
  2864. uint32_t timer;
  2865. ENTER("qla1280_req_pkt");
  2866. /*
  2867. * This can be called from interrupt context, damn it!!!
  2868. */
  2869. /* Wait for 30 seconds for slot. */
  2870. for (timer = 15000000; timer; timer--) {
  2871. if (ha->req_q_cnt > 0) {
  2872. /* Calculate number of free request entries. */
  2873. cnt = RD_REG_WORD(&reg->mailbox4);
  2874. if (ha->req_ring_index < cnt)
  2875. ha->req_q_cnt = cnt - ha->req_ring_index;
  2876. else
  2877. ha->req_q_cnt =
  2878. REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
  2879. }
  2880. /* Found empty request ring slot? */
  2881. if (ha->req_q_cnt > 0) {
  2882. ha->req_q_cnt--;
  2883. pkt = ha->request_ring_ptr;
  2884. /* Zero out packet. */
  2885. memset(pkt, 0, REQUEST_ENTRY_SIZE);
  2886. /*
  2887. * How can this be right when we have a ring
  2888. * size of 512???
  2889. */
  2890. /* Set system defined field. */
  2891. pkt->sys_define = (uint8_t) ha->req_ring_index;
  2892. /* Set entry count. */
  2893. pkt->entry_count = 1;
  2894. break;
  2895. }
  2896. udelay(2); /* 10 */
  2897. /* Check for pending interrupts. */
  2898. qla1280_poll(ha);
  2899. }
  2900. if (!pkt)
  2901. dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
  2902. else
  2903. dprintk(3, "qla1280_req_pkt: exiting normally\n");
  2904. return pkt;
  2905. }
  2906. /*
  2907. * qla1280_isp_cmd
  2908. * Function is responsible for modifying ISP input pointer.
  2909. * Releases ring lock.
  2910. *
  2911. * Input:
  2912. * ha = adapter block pointer.
  2913. */
  2914. static void
  2915. qla1280_isp_cmd(struct scsi_qla_host *ha)
  2916. {
  2917. struct device_reg __iomem *reg = ha->iobase;
  2918. ENTER("qla1280_isp_cmd");
  2919. dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
  2920. qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
  2921. REQUEST_ENTRY_SIZE);
  2922. /* Adjust ring index. */
  2923. ha->req_ring_index++;
  2924. if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
  2925. ha->req_ring_index = 0;
  2926. ha->request_ring_ptr = ha->request_ring;
  2927. } else
  2928. ha->request_ring_ptr++;
  2929. /*
  2930. * Update request index to mailbox4 (Request Queue In).
  2931. * The mmiowb() ensures that this write is ordered with writes by other
  2932. * CPUs. Without the mmiowb(), it is possible for the following:
  2933. * CPUA posts write of index 5 to mailbox4
  2934. * CPUA releases host lock
  2935. * CPUB acquires host lock
  2936. * CPUB posts write of index 6 to mailbox4
  2937. * On PCI bus, order reverses and write of 6 posts, then index 5,
  2938. * causing chip to issue full queue of stale commands
  2939. * The mmiowb() prevents future writes from crossing the barrier.
  2940. * See Documentation/DocBook/deviceiobook.tmpl for more information.
  2941. */
  2942. WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
  2943. mmiowb();
  2944. LEAVE("qla1280_isp_cmd");
  2945. }
  2946. /****************************************************************************/
  2947. /* Interrupt Service Routine. */
  2948. /****************************************************************************/
  2949. /****************************************************************************
  2950. * qla1280_isr
  2951. * Calls I/O done on command completion.
  2952. *
  2953. * Input:
  2954. * ha = adapter block pointer.
  2955. * done_q = done queue.
  2956. ****************************************************************************/
  2957. static void
  2958. qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
  2959. {
  2960. struct device_reg __iomem *reg = ha->iobase;
  2961. struct response *pkt;
  2962. struct srb *sp = NULL;
  2963. uint16_t mailbox[MAILBOX_REGISTER_COUNT];
  2964. uint16_t *wptr;
  2965. uint32_t index;
  2966. u16 istatus;
  2967. ENTER("qla1280_isr");
  2968. istatus = RD_REG_WORD(&reg->istatus);
  2969. if (!(istatus & (RISC_INT | PCI_INT)))
  2970. return;
  2971. /* Save mailbox register 5 */
  2972. mailbox[5] = RD_REG_WORD(&reg->mailbox5);
  2973. /* Check for mailbox interrupt. */
  2974. mailbox[0] = RD_REG_WORD_dmasync(&reg->semaphore);
  2975. if (mailbox[0] & BIT_0) {
  2976. /* Get mailbox data. */
  2977. /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */
  2978. wptr = &mailbox[0];
  2979. *wptr++ = RD_REG_WORD(&reg->mailbox0);
  2980. *wptr++ = RD_REG_WORD(&reg->mailbox1);
  2981. *wptr = RD_REG_WORD(&reg->mailbox2);
  2982. if (mailbox[0] != MBA_SCSI_COMPLETION) {
  2983. wptr++;
  2984. *wptr++ = RD_REG_WORD(&reg->mailbox3);
  2985. *wptr++ = RD_REG_WORD(&reg->mailbox4);
  2986. wptr++;
  2987. *wptr++ = RD_REG_WORD(&reg->mailbox6);
  2988. *wptr = RD_REG_WORD(&reg->mailbox7);
  2989. }
  2990. /* Release mailbox registers. */
  2991. WRT_REG_WORD(&reg->semaphore, 0);
  2992. WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
  2993. dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
  2994. mailbox[0]);
  2995. /* Handle asynchronous event */
  2996. switch (mailbox[0]) {
  2997. case MBA_SCSI_COMPLETION: /* Response completion */
  2998. dprintk(5, "qla1280_isr: mailbox SCSI response "
  2999. "completion\n");
  3000. if (ha->flags.online) {
  3001. /* Get outstanding command index. */
  3002. index = mailbox[2] << 16 | mailbox[1];
  3003. /* Validate handle. */
  3004. if (index < MAX_OUTSTANDING_COMMANDS)
  3005. sp = ha->outstanding_cmds[index];
  3006. else
  3007. sp = NULL;
  3008. if (sp) {
  3009. /* Free outstanding command slot. */
  3010. ha->outstanding_cmds[index] = NULL;
  3011. /* Save ISP completion status */
  3012. CMD_RESULT(sp->cmd) = 0;
  3013. CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
  3014. /* Place block on done queue */
  3015. list_add_tail(&sp->list, done_q);
  3016. } else {
  3017. /*
  3018. * If we get here we have a real problem!
  3019. */
  3020. printk(KERN_WARNING
  3021. "qla1280: ISP invalid handle\n");
  3022. }
  3023. }
  3024. break;
  3025. case MBA_BUS_RESET: /* SCSI Bus Reset */
  3026. ha->flags.reset_marker = 1;
  3027. index = mailbox[6] & BIT_0;
  3028. ha->bus_settings[index].reset_marker = 1;
  3029. printk(KERN_DEBUG "qla1280_isr(): index %i "
  3030. "asynchronous BUS_RESET\n", index);
  3031. break;
  3032. case MBA_SYSTEM_ERR: /* System Error */
  3033. printk(KERN_WARNING
  3034. "qla1280: ISP System Error - mbx1=%xh, mbx2="
  3035. "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
  3036. mailbox[3]);
  3037. break;
  3038. case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
  3039. printk(KERN_WARNING
  3040. "qla1280: ISP Request Transfer Error\n");
  3041. break;
  3042. case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
  3043. printk(KERN_WARNING
  3044. "qla1280: ISP Response Transfer Error\n");
  3045. break;
  3046. case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
  3047. dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
  3048. break;
  3049. case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */
  3050. dprintk(2,
  3051. "qla1280_isr: asynchronous TIMEOUT_RESET\n");
  3052. break;
  3053. case MBA_DEVICE_RESET: /* Bus Device Reset */
  3054. printk(KERN_INFO "qla1280_isr(): asynchronous "
  3055. "BUS_DEVICE_RESET\n");
  3056. ha->flags.reset_marker = 1;
  3057. index = mailbox[6] & BIT_0;
  3058. ha->bus_settings[index].reset_marker = 1;
  3059. break;
  3060. case MBA_BUS_MODE_CHANGE:
  3061. dprintk(2,
  3062. "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
  3063. break;
  3064. default:
  3065. /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */
  3066. if (mailbox[0] < MBA_ASYNC_EVENT) {
  3067. wptr = &mailbox[0];
  3068. memcpy((uint16_t *) ha->mailbox_out, wptr,
  3069. MAILBOX_REGISTER_COUNT *
  3070. sizeof(uint16_t));
  3071. if(ha->mailbox_wait != NULL)
  3072. complete(ha->mailbox_wait);
  3073. }
  3074. break;
  3075. }
  3076. } else {
  3077. WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
  3078. }
  3079. /*
  3080. * We will receive interrupts during mailbox testing prior to
  3081. * the card being marked online, hence the double check.
  3082. */
  3083. if (!(ha->flags.online && !ha->mailbox_wait)) {
  3084. dprintk(2, "qla1280_isr: Response pointer Error\n");
  3085. goto out;
  3086. }
  3087. if (mailbox[5] >= RESPONSE_ENTRY_CNT)
  3088. goto out;
  3089. while (ha->rsp_ring_index != mailbox[5]) {
  3090. pkt = ha->response_ring_ptr;
  3091. dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
  3092. " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
  3093. dprintk(5,"qla1280_isr: response packet data\n");
  3094. qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
  3095. if (pkt->entry_type == STATUS_TYPE) {
  3096. if ((le16_to_cpu(pkt->scsi_status) & 0xff)
  3097. || pkt->comp_status || pkt->entry_status) {
  3098. dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
  3099. "0x%x mailbox[5] = 0x%x, comp_status "
  3100. "= 0x%x, scsi_status = 0x%x\n",
  3101. ha->rsp_ring_index, mailbox[5],
  3102. le16_to_cpu(pkt->comp_status),
  3103. le16_to_cpu(pkt->scsi_status));
  3104. }
  3105. } else {
  3106. dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
  3107. "0x%x, mailbox[5] = 0x%x\n",
  3108. ha->rsp_ring_index, mailbox[5]);
  3109. dprintk(2, "qla1280_isr: response packet data\n");
  3110. qla1280_dump_buffer(2, (char *)pkt,
  3111. RESPONSE_ENTRY_SIZE);
  3112. }
  3113. if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
  3114. dprintk(2, "status: Cmd %p, handle %i\n",
  3115. ha->outstanding_cmds[pkt->handle]->cmd,
  3116. pkt->handle);
  3117. if (pkt->entry_type == STATUS_TYPE)
  3118. qla1280_status_entry(ha, pkt, done_q);
  3119. else
  3120. qla1280_error_entry(ha, pkt, done_q);
  3121. /* Adjust ring index. */
  3122. ha->rsp_ring_index++;
  3123. if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
  3124. ha->rsp_ring_index = 0;
  3125. ha->response_ring_ptr = ha->response_ring;
  3126. } else
  3127. ha->response_ring_ptr++;
  3128. WRT_REG_WORD(&reg->mailbox5, ha->rsp_ring_index);
  3129. }
  3130. }
  3131. out:
  3132. LEAVE("qla1280_isr");
  3133. }
  3134. /*
  3135. * qla1280_rst_aen
  3136. * Processes asynchronous reset.
  3137. *
  3138. * Input:
  3139. * ha = adapter block pointer.
  3140. */
  3141. static void
  3142. qla1280_rst_aen(struct scsi_qla_host *ha)
  3143. {
  3144. uint8_t bus;
  3145. ENTER("qla1280_rst_aen");
  3146. if (ha->flags.online && !ha->flags.reset_active &&
  3147. !ha->flags.abort_isp_active) {
  3148. ha->flags.reset_active = 1;
  3149. while (ha->flags.reset_marker) {
  3150. /* Issue marker command. */
  3151. ha->flags.reset_marker = 0;
  3152. for (bus = 0; bus < ha->ports &&
  3153. !ha->flags.reset_marker; bus++) {
  3154. if (ha->bus_settings[bus].reset_marker) {
  3155. ha->bus_settings[bus].reset_marker = 0;
  3156. qla1280_marker(ha, bus, 0, 0,
  3157. MK_SYNC_ALL);
  3158. }
  3159. }
  3160. }
  3161. }
  3162. LEAVE("qla1280_rst_aen");
  3163. }
  3164. /*
  3165. * qla1280_status_entry
  3166. * Processes received ISP status entry.
  3167. *
  3168. * Input:
  3169. * ha = adapter block pointer.
  3170. * pkt = entry pointer.
  3171. * done_q = done queue.
  3172. */
  3173. static void
  3174. qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
  3175. struct list_head *done_q)
  3176. {
  3177. unsigned int bus, target, lun;
  3178. int sense_sz;
  3179. struct srb *sp;
  3180. struct scsi_cmnd *cmd;
  3181. uint32_t handle = le32_to_cpu(pkt->handle);
  3182. uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
  3183. uint16_t comp_status = le16_to_cpu(pkt->comp_status);
  3184. ENTER("qla1280_status_entry");
  3185. /* Validate handle. */
  3186. if (handle < MAX_OUTSTANDING_COMMANDS)
  3187. sp = ha->outstanding_cmds[handle];
  3188. else
  3189. sp = NULL;
  3190. if (!sp) {
  3191. printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
  3192. goto out;
  3193. }
  3194. /* Free outstanding command slot. */
  3195. ha->outstanding_cmds[handle] = NULL;
  3196. cmd = sp->cmd;
  3197. /* Generate LU queue on cntrl, target, LUN */
  3198. bus = SCSI_BUS_32(cmd);
  3199. target = SCSI_TCN_32(cmd);
  3200. lun = SCSI_LUN_32(cmd);
  3201. if (comp_status || scsi_status) {
  3202. dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
  3203. "0x%x, handle = 0x%x\n", comp_status,
  3204. scsi_status, handle);
  3205. }
  3206. /* Target busy or queue full */
  3207. if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
  3208. (scsi_status & 0xFF) == SAM_STAT_BUSY) {
  3209. CMD_RESULT(cmd) = scsi_status & 0xff;
  3210. } else {
  3211. /* Save ISP completion status */
  3212. CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
  3213. if (scsi_status & SAM_STAT_CHECK_CONDITION) {
  3214. if (comp_status != CS_ARS_FAILED) {
  3215. uint16_t req_sense_length =
  3216. le16_to_cpu(pkt->req_sense_length);
  3217. if (req_sense_length < CMD_SNSLEN(cmd))
  3218. sense_sz = req_sense_length;
  3219. else
  3220. /*
  3221. * scsi_cmnd->sense_buffer is
  3222. * 64 bytes, why only copy 63?
  3223. * This looks wrong! /Jes
  3224. */
  3225. sense_sz = CMD_SNSLEN(cmd) - 1;
  3226. memcpy(cmd->sense_buffer,
  3227. &pkt->req_sense_data, sense_sz);
  3228. } else
  3229. sense_sz = 0;
  3230. memset(cmd->sense_buffer + sense_sz, 0,
  3231. SCSI_SENSE_BUFFERSIZE - sense_sz);
  3232. dprintk(2, "qla1280_status_entry: Check "
  3233. "condition Sense data, b %i, t %i, "
  3234. "l %i\n", bus, target, lun);
  3235. if (sense_sz)
  3236. qla1280_dump_buffer(2,
  3237. (char *)cmd->sense_buffer,
  3238. sense_sz);
  3239. }
  3240. }
  3241. CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
  3242. /* Place command on done queue. */
  3243. list_add_tail(&sp->list, done_q);
  3244. out:
  3245. LEAVE("qla1280_status_entry");
  3246. }
  3247. /*
  3248. * qla1280_error_entry
  3249. * Processes error entry.
  3250. *
  3251. * Input:
  3252. * ha = adapter block pointer.
  3253. * pkt = entry pointer.
  3254. * done_q = done queue.
  3255. */
  3256. static void
  3257. qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
  3258. struct list_head *done_q)
  3259. {
  3260. struct srb *sp;
  3261. uint32_t handle = le32_to_cpu(pkt->handle);
  3262. ENTER("qla1280_error_entry");
  3263. if (pkt->entry_status & BIT_3)
  3264. dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
  3265. else if (pkt->entry_status & BIT_2)
  3266. dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
  3267. else if (pkt->entry_status & BIT_1)
  3268. dprintk(2, "qla1280_error_entry: FULL flag error\n");
  3269. else
  3270. dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
  3271. /* Validate handle. */
  3272. if (handle < MAX_OUTSTANDING_COMMANDS)
  3273. sp = ha->outstanding_cmds[handle];
  3274. else
  3275. sp = NULL;
  3276. if (sp) {
  3277. /* Free outstanding command slot. */
  3278. ha->outstanding_cmds[handle] = NULL;
  3279. /* Bad payload or header */
  3280. if (pkt->entry_status & (BIT_3 + BIT_2)) {
  3281. /* Bad payload or header, set error status. */
  3282. /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */
  3283. CMD_RESULT(sp->cmd) = DID_ERROR << 16;
  3284. } else if (pkt->entry_status & BIT_1) { /* FULL flag */
  3285. CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
  3286. } else {
  3287. /* Set error status. */
  3288. CMD_RESULT(sp->cmd) = DID_ERROR << 16;
  3289. }
  3290. CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
  3291. /* Place command on done queue. */
  3292. list_add_tail(&sp->list, done_q);
  3293. }
  3294. #ifdef QLA_64BIT_PTR
  3295. else if (pkt->entry_type == COMMAND_A64_TYPE) {
  3296. printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
  3297. }
  3298. #endif
  3299. LEAVE("qla1280_error_entry");
  3300. }
  3301. /*
  3302. * qla1280_abort_isp
  3303. * Resets ISP and aborts all outstanding commands.
  3304. *
  3305. * Input:
  3306. * ha = adapter block pointer.
  3307. *
  3308. * Returns:
  3309. * 0 = success
  3310. */
  3311. static int
  3312. qla1280_abort_isp(struct scsi_qla_host *ha)
  3313. {
  3314. struct device_reg __iomem *reg = ha->iobase;
  3315. struct srb *sp;
  3316. int status = 0;
  3317. int cnt;
  3318. int bus;
  3319. ENTER("qla1280_abort_isp");
  3320. if (ha->flags.abort_isp_active || !ha->flags.online)
  3321. goto out;
  3322. ha->flags.abort_isp_active = 1;
  3323. /* Disable ISP interrupts. */
  3324. qla1280_disable_intrs(ha);
  3325. WRT_REG_WORD(&reg->host_cmd, HC_PAUSE_RISC);
  3326. RD_REG_WORD(&reg->id_l);
  3327. printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
  3328. ha->host_no);
  3329. /* Dequeue all commands in outstanding command list. */
  3330. for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
  3331. struct scsi_cmnd *cmd;
  3332. sp = ha->outstanding_cmds[cnt];
  3333. if (sp) {
  3334. cmd = sp->cmd;
  3335. CMD_RESULT(cmd) = DID_RESET << 16;
  3336. CMD_HANDLE(cmd) = COMPLETED_HANDLE;
  3337. ha->outstanding_cmds[cnt] = NULL;
  3338. list_add_tail(&sp->list, &ha->done_q);
  3339. }
  3340. }
  3341. qla1280_done(ha);
  3342. status = qla1280_load_firmware(ha);
  3343. if (status)
  3344. goto out;
  3345. /* Setup adapter based on NVRAM parameters. */
  3346. qla1280_nvram_config (ha);
  3347. status = qla1280_init_rings(ha);
  3348. if (status)
  3349. goto out;
  3350. /* Issue SCSI reset. */
  3351. for (bus = 0; bus < ha->ports; bus++)
  3352. qla1280_bus_reset(ha, bus);
  3353. ha->flags.abort_isp_active = 0;
  3354. out:
  3355. if (status) {
  3356. printk(KERN_WARNING
  3357. "qla1280: ISP error recovery failed, board disabled");
  3358. qla1280_reset_adapter(ha);
  3359. dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
  3360. }
  3361. LEAVE("qla1280_abort_isp");
  3362. return status;
  3363. }
  3364. /*
  3365. * qla1280_debounce_register
  3366. * Debounce register.
  3367. *
  3368. * Input:
  3369. * port = register address.
  3370. *
  3371. * Returns:
  3372. * register value.
  3373. */
  3374. static u16
  3375. qla1280_debounce_register(volatile u16 __iomem * addr)
  3376. {
  3377. volatile u16 ret;
  3378. volatile u16 ret2;
  3379. ret = RD_REG_WORD(addr);
  3380. ret2 = RD_REG_WORD(addr);
  3381. if (ret == ret2)
  3382. return ret;
  3383. do {
  3384. cpu_relax();
  3385. ret = RD_REG_WORD(addr);
  3386. ret2 = RD_REG_WORD(addr);
  3387. } while (ret != ret2);
  3388. return ret;
  3389. }
  3390. /************************************************************************
  3391. * qla1280_check_for_dead_scsi_bus *
  3392. * *
  3393. * This routine checks for a dead SCSI bus *
  3394. ************************************************************************/
  3395. #define SET_SXP_BANK 0x0100
  3396. #define SCSI_PHASE_INVALID 0x87FF
  3397. static int
  3398. qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
  3399. {
  3400. uint16_t config_reg, scsi_control;
  3401. struct device_reg __iomem *reg = ha->iobase;
  3402. if (ha->bus_settings[bus].scsi_bus_dead) {
  3403. WRT_REG_WORD(&reg->host_cmd, HC_PAUSE_RISC);
  3404. config_reg = RD_REG_WORD(&reg->cfg_1);
  3405. WRT_REG_WORD(&reg->cfg_1, SET_SXP_BANK);
  3406. scsi_control = RD_REG_WORD(&reg->scsiControlPins);
  3407. WRT_REG_WORD(&reg->cfg_1, config_reg);
  3408. WRT_REG_WORD(&reg->host_cmd, HC_RELEASE_RISC);
  3409. if (scsi_control == SCSI_PHASE_INVALID) {
  3410. ha->bus_settings[bus].scsi_bus_dead = 1;
  3411. return 1; /* bus is dead */
  3412. } else {
  3413. ha->bus_settings[bus].scsi_bus_dead = 0;
  3414. ha->bus_settings[bus].failed_reset_count = 0;
  3415. }
  3416. }
  3417. return 0; /* bus is not dead */
  3418. }
  3419. static void
  3420. qla1280_get_target_parameters(struct scsi_qla_host *ha,
  3421. struct scsi_device *device)
  3422. {
  3423. uint16_t mb[MAILBOX_REGISTER_COUNT];
  3424. int bus, target, lun;
  3425. bus = device->channel;
  3426. target = device->id;
  3427. lun = device->lun;
  3428. mb[0] = MBC_GET_TARGET_PARAMETERS;
  3429. mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
  3430. mb[1] <<= 8;
  3431. qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
  3432. &mb[0]);
  3433. printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
  3434. if (mb[3] != 0) {
  3435. printk(" Sync: period %d, offset %d",
  3436. (mb[3] & 0xff), (mb[3] >> 8));
  3437. if (mb[2] & BIT_13)
  3438. printk(", Wide");
  3439. if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
  3440. printk(", DT");
  3441. } else
  3442. printk(" Async");
  3443. if (device->simple_tags)
  3444. printk(", Tagged queuing: depth %d", device->queue_depth);
  3445. printk("\n");
  3446. }
  3447. #if DEBUG_QLA1280
  3448. static void
  3449. __qla1280_dump_buffer(char *b, int size)
  3450. {
  3451. int cnt;
  3452. u8 c;
  3453. printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
  3454. "Bh Ch Dh Eh Fh\n");
  3455. printk(KERN_DEBUG "---------------------------------------------"
  3456. "------------------\n");
  3457. for (cnt = 0; cnt < size;) {
  3458. c = *b++;
  3459. printk("0x%02x", c);
  3460. cnt++;
  3461. if (!(cnt % 16))
  3462. printk("\n");
  3463. else
  3464. printk(" ");
  3465. }
  3466. if (cnt % 16)
  3467. printk("\n");
  3468. }
  3469. /**************************************************************************
  3470. * ql1280_print_scsi_cmd
  3471. *
  3472. **************************************************************************/
  3473. static void
  3474. __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
  3475. {
  3476. struct scsi_qla_host *ha;
  3477. struct Scsi_Host *host = CMD_HOST(cmd);
  3478. struct srb *sp;
  3479. /* struct scatterlist *sg; */
  3480. int i;
  3481. ha = (struct scsi_qla_host *)host->hostdata;
  3482. sp = (struct srb *)CMD_SP(cmd);
  3483. printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
  3484. printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
  3485. SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
  3486. CMD_CDBLEN(cmd));
  3487. printk(" CDB = ");
  3488. for (i = 0; i < cmd->cmd_len; i++) {
  3489. printk("0x%02x ", cmd->cmnd[i]);
  3490. }
  3491. printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
  3492. printk(" request buffer=0x%p, request buffer len=0x%x\n",
  3493. scsi_sglist(cmd), scsi_bufflen(cmd));
  3494. /* if (cmd->use_sg)
  3495. {
  3496. sg = (struct scatterlist *) cmd->request_buffer;
  3497. printk(" SG buffer: \n");
  3498. qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
  3499. } */
  3500. printk(" tag=%d, transfersize=0x%x \n",
  3501. cmd->tag, cmd->transfersize);
  3502. printk(" SP=0x%p\n", CMD_SP(cmd));
  3503. printk(" underflow size = 0x%x, direction=0x%x\n",
  3504. cmd->underflow, cmd->sc_data_direction);
  3505. }
  3506. /**************************************************************************
  3507. * ql1280_dump_device
  3508. *
  3509. **************************************************************************/
  3510. static void
  3511. ql1280_dump_device(struct scsi_qla_host *ha)
  3512. {
  3513. struct scsi_cmnd *cp;
  3514. struct srb *sp;
  3515. int i;
  3516. printk(KERN_DEBUG "Outstanding Commands on controller:\n");
  3517. for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
  3518. if ((sp = ha->outstanding_cmds[i]) == NULL)
  3519. continue;
  3520. if ((cp = sp->cmd) == NULL)
  3521. continue;
  3522. qla1280_print_scsi_cmd(1, cp);
  3523. }
  3524. }
  3525. #endif
  3526. enum tokens {
  3527. TOKEN_NVRAM,
  3528. TOKEN_SYNC,
  3529. TOKEN_WIDE,
  3530. TOKEN_PPR,
  3531. TOKEN_VERBOSE,
  3532. TOKEN_DEBUG,
  3533. };
  3534. struct setup_tokens {
  3535. char *token;
  3536. int val;
  3537. };
  3538. static struct setup_tokens setup_token[] __initdata =
  3539. {
  3540. { "nvram", TOKEN_NVRAM },
  3541. { "sync", TOKEN_SYNC },
  3542. { "wide", TOKEN_WIDE },
  3543. { "ppr", TOKEN_PPR },
  3544. { "verbose", TOKEN_VERBOSE },
  3545. { "debug", TOKEN_DEBUG },
  3546. };
  3547. /**************************************************************************
  3548. * qla1280_setup
  3549. *
  3550. * Handle boot parameters. This really needs to be changed so one
  3551. * can specify per adapter parameters.
  3552. **************************************************************************/
  3553. static int __init
  3554. qla1280_setup(char *s)
  3555. {
  3556. char *cp, *ptr;
  3557. unsigned long val;
  3558. int toke;
  3559. cp = s;
  3560. while (cp && (ptr = strchr(cp, ':'))) {
  3561. ptr++;
  3562. if (!strcmp(ptr, "yes")) {
  3563. val = 0x10000;
  3564. ptr += 3;
  3565. } else if (!strcmp(ptr, "no")) {
  3566. val = 0;
  3567. ptr += 2;
  3568. } else
  3569. val = simple_strtoul(ptr, &ptr, 0);
  3570. switch ((toke = qla1280_get_token(cp))) {
  3571. case TOKEN_NVRAM:
  3572. if (!val)
  3573. driver_setup.no_nvram = 1;
  3574. break;
  3575. case TOKEN_SYNC:
  3576. if (!val)
  3577. driver_setup.no_sync = 1;
  3578. else if (val != 0x10000)
  3579. driver_setup.sync_mask = val;
  3580. break;
  3581. case TOKEN_WIDE:
  3582. if (!val)
  3583. driver_setup.no_wide = 1;
  3584. else if (val != 0x10000)
  3585. driver_setup.wide_mask = val;
  3586. break;
  3587. case TOKEN_PPR:
  3588. if (!val)
  3589. driver_setup.no_ppr = 1;
  3590. else if (val != 0x10000)
  3591. driver_setup.ppr_mask = val;
  3592. break;
  3593. case TOKEN_VERBOSE:
  3594. qla1280_verbose = val;
  3595. break;
  3596. default:
  3597. printk(KERN_INFO "qla1280: unknown boot option %s\n",
  3598. cp);
  3599. }
  3600. cp = strchr(ptr, ';');
  3601. if (cp)
  3602. cp++;
  3603. else {
  3604. break;
  3605. }
  3606. }
  3607. return 1;
  3608. }
  3609. static int __init
  3610. qla1280_get_token(char *str)
  3611. {
  3612. char *sep;
  3613. long ret = -1;
  3614. int i;
  3615. sep = strchr(str, ':');
  3616. if (sep) {
  3617. for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
  3618. if (!strncmp(setup_token[i].token, str, (sep - str))) {
  3619. ret = setup_token[i].val;
  3620. break;
  3621. }
  3622. }
  3623. }
  3624. return ret;
  3625. }
  3626. static struct scsi_host_template qla1280_driver_template = {
  3627. .module = THIS_MODULE,
  3628. .proc_name = "qla1280",
  3629. .name = "Qlogic ISP 1280/12160",
  3630. .info = qla1280_info,
  3631. .slave_configure = qla1280_slave_configure,
  3632. .queuecommand = qla1280_queuecommand,
  3633. .eh_abort_handler = qla1280_eh_abort,
  3634. .eh_device_reset_handler= qla1280_eh_device_reset,
  3635. .eh_bus_reset_handler = qla1280_eh_bus_reset,
  3636. .eh_host_reset_handler = qla1280_eh_adapter_reset,
  3637. .bios_param = qla1280_biosparam,
  3638. .can_queue = 0xfffff,
  3639. .this_id = -1,
  3640. .sg_tablesize = SG_ALL,
  3641. .cmd_per_lun = 1,
  3642. .use_clustering = ENABLE_CLUSTERING,
  3643. };
  3644. static int __devinit
  3645. qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  3646. {
  3647. int devnum = id->driver_data;
  3648. struct qla_boards *bdp = &ql1280_board_tbl[devnum];
  3649. struct Scsi_Host *host;
  3650. struct scsi_qla_host *ha;
  3651. int error = -ENODEV;
  3652. /* Bypass all AMI SUBSYS VENDOR IDs */
  3653. if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
  3654. printk(KERN_INFO
  3655. "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
  3656. goto error;
  3657. }
  3658. printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
  3659. bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
  3660. if (pci_enable_device(pdev)) {
  3661. printk(KERN_WARNING
  3662. "qla1280: Failed to enabled pci device, aborting.\n");
  3663. goto error;
  3664. }
  3665. pci_set_master(pdev);
  3666. error = -ENOMEM;
  3667. host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
  3668. if (!host) {
  3669. printk(KERN_WARNING
  3670. "qla1280: Failed to register host, aborting.\n");
  3671. goto error_disable_device;
  3672. }
  3673. ha = (struct scsi_qla_host *)host->hostdata;
  3674. memset(ha, 0, sizeof(struct scsi_qla_host));
  3675. ha->pdev = pdev;
  3676. ha->devnum = devnum; /* specifies microcode load address */
  3677. #ifdef QLA_64BIT_PTR
  3678. if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
  3679. if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
  3680. printk(KERN_WARNING "scsi(%li): Unable to set a "
  3681. "suitable DMA mask - aborting\n", ha->host_no);
  3682. error = -ENODEV;
  3683. goto error_put_host;
  3684. }
  3685. } else
  3686. dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
  3687. ha->host_no);
  3688. #else
  3689. if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
  3690. printk(KERN_WARNING "scsi(%li): Unable to set a "
  3691. "suitable DMA mask - aborting\n", ha->host_no);
  3692. error = -ENODEV;
  3693. goto error_put_host;
  3694. }
  3695. #endif
  3696. ha->request_ring = pci_alloc_consistent(ha->pdev,
  3697. ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
  3698. &ha->request_dma);
  3699. if (!ha->request_ring) {
  3700. printk(KERN_INFO "qla1280: Failed to get request memory\n");
  3701. goto error_put_host;
  3702. }
  3703. ha->response_ring = pci_alloc_consistent(ha->pdev,
  3704. ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
  3705. &ha->response_dma);
  3706. if (!ha->response_ring) {
  3707. printk(KERN_INFO "qla1280: Failed to get response memory\n");
  3708. goto error_free_request_ring;
  3709. }
  3710. ha->ports = bdp->numPorts;
  3711. ha->host = host;
  3712. ha->host_no = host->host_no;
  3713. host->irq = pdev->irq;
  3714. host->max_channel = bdp->numPorts - 1;
  3715. host->max_lun = MAX_LUNS - 1;
  3716. host->max_id = MAX_TARGETS;
  3717. host->max_sectors = 1024;
  3718. host->unique_id = host->host_no;
  3719. error = -ENODEV;
  3720. #if MEMORY_MAPPED_IO
  3721. ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
  3722. if (!ha->mmpbase) {
  3723. printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
  3724. goto error_free_response_ring;
  3725. }
  3726. host->base = (unsigned long)ha->mmpbase;
  3727. ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
  3728. #else
  3729. host->io_port = pci_resource_start(ha->pdev, 0);
  3730. if (!request_region(host->io_port, 0xff, "qla1280")) {
  3731. printk(KERN_INFO "qla1280: Failed to reserve i/o region "
  3732. "0x%04lx-0x%04lx - already in use\n",
  3733. host->io_port, host->io_port + 0xff);
  3734. goto error_free_response_ring;
  3735. }
  3736. ha->iobase = (struct device_reg *)host->io_port;
  3737. #endif
  3738. INIT_LIST_HEAD(&ha->done_q);
  3739. /* Disable ISP interrupts. */
  3740. qla1280_disable_intrs(ha);
  3741. if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
  3742. "qla1280", ha)) {
  3743. printk("qla1280 : Failed to reserve interrupt %d already "
  3744. "in use\n", pdev->irq);
  3745. goto error_release_region;
  3746. }
  3747. /* load the F/W, read paramaters, and init the H/W */
  3748. if (qla1280_initialize_adapter(ha)) {
  3749. printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
  3750. goto error_free_irq;
  3751. }
  3752. /* set our host ID (need to do something about our two IDs) */
  3753. host->this_id = ha->bus_settings[0].id;
  3754. pci_set_drvdata(pdev, host);
  3755. error = scsi_add_host(host, &pdev->dev);
  3756. if (error)
  3757. goto error_disable_adapter;
  3758. scsi_scan_host(host);
  3759. return 0;
  3760. error_disable_adapter:
  3761. qla1280_disable_intrs(ha);
  3762. error_free_irq:
  3763. free_irq(pdev->irq, ha);
  3764. error_release_region:
  3765. #if MEMORY_MAPPED_IO
  3766. iounmap(ha->mmpbase);
  3767. #else
  3768. release_region(host->io_port, 0xff);
  3769. #endif
  3770. error_free_response_ring:
  3771. pci_free_consistent(ha->pdev,
  3772. ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
  3773. ha->response_ring, ha->response_dma);
  3774. error_free_request_ring:
  3775. pci_free_consistent(ha->pdev,
  3776. ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
  3777. ha->request_ring, ha->request_dma);
  3778. error_put_host:
  3779. scsi_host_put(host);
  3780. error_disable_device:
  3781. pci_disable_device(pdev);
  3782. error:
  3783. return error;
  3784. }
  3785. static void __devexit
  3786. qla1280_remove_one(struct pci_dev *pdev)
  3787. {
  3788. struct Scsi_Host *host = pci_get_drvdata(pdev);
  3789. struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
  3790. scsi_remove_host(host);
  3791. qla1280_disable_intrs(ha);
  3792. free_irq(pdev->irq, ha);
  3793. #if MEMORY_MAPPED_IO
  3794. iounmap(ha->mmpbase);
  3795. #else
  3796. release_region(host->io_port, 0xff);
  3797. #endif
  3798. pci_free_consistent(ha->pdev,
  3799. ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
  3800. ha->request_ring, ha->request_dma);
  3801. pci_free_consistent(ha->pdev,
  3802. ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
  3803. ha->response_ring, ha->response_dma);
  3804. pci_disable_device(pdev);
  3805. scsi_host_put(host);
  3806. }
  3807. static struct pci_driver qla1280_pci_driver = {
  3808. .name = "qla1280",
  3809. .id_table = qla1280_pci_tbl,
  3810. .probe = qla1280_probe_one,
  3811. .remove = __devexit_p(qla1280_remove_one),
  3812. };
  3813. static int __init
  3814. qla1280_init(void)
  3815. {
  3816. if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
  3817. printk(KERN_WARNING
  3818. "qla1280: struct srb too big, aborting\n");
  3819. return -EINVAL;
  3820. }
  3821. #ifdef MODULE
  3822. /*
  3823. * If we are called as a module, the qla1280 pointer may not be null
  3824. * and it would point to our bootup string, just like on the lilo
  3825. * command line. IF not NULL, then process this config string with
  3826. * qla1280_setup
  3827. *
  3828. * Boot time Options
  3829. * To add options at boot time add a line to your lilo.conf file like:
  3830. * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
  3831. * which will result in the first four devices on the first two
  3832. * controllers being set to a tagged queue depth of 32.
  3833. */
  3834. if (qla1280)
  3835. qla1280_setup(qla1280);
  3836. #endif
  3837. return pci_register_driver(&qla1280_pci_driver);
  3838. }
  3839. static void __exit
  3840. qla1280_exit(void)
  3841. {
  3842. int i;
  3843. pci_unregister_driver(&qla1280_pci_driver);
  3844. /* release any allocated firmware images */
  3845. for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
  3846. if (qla1280_fw_tbl[i].fw) {
  3847. release_firmware(qla1280_fw_tbl[i].fw);
  3848. qla1280_fw_tbl[i].fw = NULL;
  3849. }
  3850. }
  3851. }
  3852. module_init(qla1280_init);
  3853. module_exit(qla1280_exit);
  3854. MODULE_AUTHOR("Qlogic & Jes Sorensen");
  3855. MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
  3856. MODULE_LICENSE("GPL");
  3857. MODULE_FIRMWARE("qlogic/1040.bin");
  3858. MODULE_FIRMWARE("qlogic/1280.bin");
  3859. MODULE_FIRMWARE("qlogic/12160.bin");
  3860. MODULE_VERSION(QLA1280_VERSION);
  3861. /*
  3862. * Overrides for Emacs so that we almost follow Linus's tabbing style.
  3863. * Emacs will notice this stuff at the end of the file and automatically
  3864. * adjust the settings for this buffer only. This must remain at the end
  3865. * of the file.
  3866. * ---------------------------------------------------------------------------
  3867. * Local variables:
  3868. * c-basic-offset: 8
  3869. * tab-width: 8
  3870. * End:
  3871. */