arcmsr_hba.c 125 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035
  1. /*
  2. *******************************************************************************
  3. ** O.S : Linux
  4. ** FILE NAME : arcmsr_hba.c
  5. ** BY : Nick Cheng, C.L. Huang
  6. ** Description: SCSI RAID Device Driver for Areca RAID Controller
  7. *******************************************************************************
  8. ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
  9. **
  10. ** Web site: www.areca.com.tw
  11. ** E-mail: support@areca.com.tw
  12. **
  13. ** This program is free software; you can redistribute it and/or modify
  14. ** it under the terms of the GNU General Public License version 2 as
  15. ** published by the Free Software Foundation.
  16. ** This program is distributed in the hope that it will be useful,
  17. ** but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. ** GNU General Public License for more details.
  20. *******************************************************************************
  21. ** Redistribution and use in source and binary forms, with or without
  22. ** modification, are permitted provided that the following conditions
  23. ** are met:
  24. ** 1. Redistributions of source code must retain the above copyright
  25. ** notice, this list of conditions and the following disclaimer.
  26. ** 2. Redistributions in binary form must reproduce the above copyright
  27. ** notice, this list of conditions and the following disclaimer in the
  28. ** documentation and/or other materials provided with the distribution.
  29. ** 3. The name of the author may not be used to endorse or promote products
  30. ** derived from this software without specific prior written permission.
  31. **
  32. ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  33. ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  34. ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  35. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  36. ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
  37. ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  38. ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
  39. ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  40. ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
  41. ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  42. *******************************************************************************
  43. ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
  44. ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
  45. *******************************************************************************
  46. */
  47. #include <linux/module.h>
  48. #include <linux/reboot.h>
  49. #include <linux/spinlock.h>
  50. #include <linux/pci_ids.h>
  51. #include <linux/interrupt.h>
  52. #include <linux/moduleparam.h>
  53. #include <linux/errno.h>
  54. #include <linux/types.h>
  55. #include <linux/delay.h>
  56. #include <linux/dma-mapping.h>
  57. #include <linux/timer.h>
  58. #include <linux/slab.h>
  59. #include <linux/pci.h>
  60. #include <linux/aer.h>
  61. #include <linux/circ_buf.h>
  62. #include <asm/dma.h>
  63. #include <asm/io.h>
  64. #include <asm/uaccess.h>
  65. #include <scsi/scsi_host.h>
  66. #include <scsi/scsi.h>
  67. #include <scsi/scsi_cmnd.h>
  68. #include <scsi/scsi_tcq.h>
  69. #include <scsi/scsi_device.h>
  70. #include <scsi/scsi_transport.h>
  71. #include <scsi/scsicam.h>
  72. #include "arcmsr.h"
  73. MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
  74. MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
  75. MODULE_LICENSE("Dual BSD/GPL");
  76. MODULE_VERSION(ARCMSR_DRIVER_VERSION);
  77. #define ARCMSR_SLEEPTIME 10
  78. #define ARCMSR_RETRYCOUNT 12
  79. static wait_queue_head_t wait_q;
  80. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
  81. struct scsi_cmnd *cmd);
  82. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
  83. static int arcmsr_abort(struct scsi_cmnd *);
  84. static int arcmsr_bus_reset(struct scsi_cmnd *);
  85. static int arcmsr_bios_param(struct scsi_device *sdev,
  86. struct block_device *bdev, sector_t capacity, int *info);
  87. static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
  88. static int arcmsr_probe(struct pci_dev *pdev,
  89. const struct pci_device_id *id);
  90. static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state);
  91. static int arcmsr_resume(struct pci_dev *pdev);
  92. static void arcmsr_remove(struct pci_dev *pdev);
  93. static void arcmsr_shutdown(struct pci_dev *pdev);
  94. static void arcmsr_iop_init(struct AdapterControlBlock *acb);
  95. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
  96. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
  97. static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
  98. u32 intmask_org);
  99. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
  100. static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
  101. static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
  102. static void arcmsr_request_device_map(unsigned long pacb);
  103. static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
  104. static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
  105. static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
  106. static void arcmsr_message_isr_bh_fn(struct work_struct *work);
  107. static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
  108. static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
  109. static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
  110. static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
  111. static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
  112. static const char *arcmsr_info(struct Scsi_Host *);
  113. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
  114. static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
  115. static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb);
  116. static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
  117. {
  118. if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
  119. queue_depth = ARCMSR_MAX_CMD_PERLUN;
  120. return scsi_change_queue_depth(sdev, queue_depth);
  121. }
  122. static struct scsi_host_template arcmsr_scsi_host_template = {
  123. .module = THIS_MODULE,
  124. .name = "Areca SAS/SATA RAID driver",
  125. .info = arcmsr_info,
  126. .queuecommand = arcmsr_queue_command,
  127. .eh_abort_handler = arcmsr_abort,
  128. .eh_bus_reset_handler = arcmsr_bus_reset,
  129. .bios_param = arcmsr_bios_param,
  130. .change_queue_depth = arcmsr_adjust_disk_queue_depth,
  131. .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
  132. .this_id = ARCMSR_SCSI_INITIATOR_ID,
  133. .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
  134. .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
  135. .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
  136. .use_clustering = ENABLE_CLUSTERING,
  137. .shost_attrs = arcmsr_host_attrs,
  138. .no_write_same = 1,
  139. };
  140. static struct pci_device_id arcmsr_device_id_table[] = {
  141. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
  142. .driver_data = ACB_ADAPTER_TYPE_A},
  143. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
  144. .driver_data = ACB_ADAPTER_TYPE_A},
  145. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
  146. .driver_data = ACB_ADAPTER_TYPE_A},
  147. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
  148. .driver_data = ACB_ADAPTER_TYPE_A},
  149. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
  150. .driver_data = ACB_ADAPTER_TYPE_A},
  151. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
  152. .driver_data = ACB_ADAPTER_TYPE_B},
  153. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
  154. .driver_data = ACB_ADAPTER_TYPE_B},
  155. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
  156. .driver_data = ACB_ADAPTER_TYPE_B},
  157. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1203),
  158. .driver_data = ACB_ADAPTER_TYPE_B},
  159. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
  160. .driver_data = ACB_ADAPTER_TYPE_A},
  161. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
  162. .driver_data = ACB_ADAPTER_TYPE_D},
  163. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
  164. .driver_data = ACB_ADAPTER_TYPE_A},
  165. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
  166. .driver_data = ACB_ADAPTER_TYPE_A},
  167. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
  168. .driver_data = ACB_ADAPTER_TYPE_A},
  169. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
  170. .driver_data = ACB_ADAPTER_TYPE_A},
  171. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
  172. .driver_data = ACB_ADAPTER_TYPE_A},
  173. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
  174. .driver_data = ACB_ADAPTER_TYPE_A},
  175. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
  176. .driver_data = ACB_ADAPTER_TYPE_A},
  177. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
  178. .driver_data = ACB_ADAPTER_TYPE_A},
  179. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
  180. .driver_data = ACB_ADAPTER_TYPE_A},
  181. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
  182. .driver_data = ACB_ADAPTER_TYPE_C},
  183. {0, 0}, /* Terminating entry */
  184. };
  185. MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
  186. static struct pci_driver arcmsr_pci_driver = {
  187. .name = "arcmsr",
  188. .id_table = arcmsr_device_id_table,
  189. .probe = arcmsr_probe,
  190. .remove = arcmsr_remove,
  191. .suspend = arcmsr_suspend,
  192. .resume = arcmsr_resume,
  193. .shutdown = arcmsr_shutdown,
  194. };
  195. /*
  196. ****************************************************************************
  197. ****************************************************************************
  198. */
  199. static void arcmsr_free_mu(struct AdapterControlBlock *acb)
  200. {
  201. switch (acb->adapter_type) {
  202. case ACB_ADAPTER_TYPE_B:
  203. case ACB_ADAPTER_TYPE_D: {
  204. dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
  205. acb->dma_coherent2, acb->dma_coherent_handle2);
  206. break;
  207. }
  208. }
  209. }
  210. static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
  211. {
  212. struct pci_dev *pdev = acb->pdev;
  213. switch (acb->adapter_type){
  214. case ACB_ADAPTER_TYPE_A:{
  215. acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
  216. if (!acb->pmuA) {
  217. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
  218. return false;
  219. }
  220. break;
  221. }
  222. case ACB_ADAPTER_TYPE_B:{
  223. void __iomem *mem_base0, *mem_base1;
  224. mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
  225. if (!mem_base0) {
  226. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
  227. return false;
  228. }
  229. mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
  230. if (!mem_base1) {
  231. iounmap(mem_base0);
  232. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
  233. return false;
  234. }
  235. acb->mem_base0 = mem_base0;
  236. acb->mem_base1 = mem_base1;
  237. break;
  238. }
  239. case ACB_ADAPTER_TYPE_C:{
  240. acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
  241. if (!acb->pmuC) {
  242. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
  243. return false;
  244. }
  245. if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
  246. writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
  247. return true;
  248. }
  249. break;
  250. }
  251. case ACB_ADAPTER_TYPE_D: {
  252. void __iomem *mem_base0;
  253. unsigned long addr, range, flags;
  254. addr = (unsigned long)pci_resource_start(pdev, 0);
  255. range = pci_resource_len(pdev, 0);
  256. flags = pci_resource_flags(pdev, 0);
  257. mem_base0 = ioremap(addr, range);
  258. if (!mem_base0) {
  259. pr_notice("arcmsr%d: memory mapping region fail\n",
  260. acb->host->host_no);
  261. return false;
  262. }
  263. acb->mem_base0 = mem_base0;
  264. break;
  265. }
  266. }
  267. return true;
  268. }
  269. static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
  270. {
  271. switch (acb->adapter_type) {
  272. case ACB_ADAPTER_TYPE_A:{
  273. iounmap(acb->pmuA);
  274. }
  275. break;
  276. case ACB_ADAPTER_TYPE_B:{
  277. iounmap(acb->mem_base0);
  278. iounmap(acb->mem_base1);
  279. }
  280. break;
  281. case ACB_ADAPTER_TYPE_C:{
  282. iounmap(acb->pmuC);
  283. }
  284. break;
  285. case ACB_ADAPTER_TYPE_D:
  286. iounmap(acb->mem_base0);
  287. break;
  288. }
  289. }
  290. static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
  291. {
  292. irqreturn_t handle_state;
  293. struct AdapterControlBlock *acb = dev_id;
  294. handle_state = arcmsr_interrupt(acb);
  295. return handle_state;
  296. }
  297. static int arcmsr_bios_param(struct scsi_device *sdev,
  298. struct block_device *bdev, sector_t capacity, int *geom)
  299. {
  300. int ret, heads, sectors, cylinders, total_capacity;
  301. unsigned char *buffer;/* return copy of block device's partition table */
  302. buffer = scsi_bios_ptable(bdev);
  303. if (buffer) {
  304. ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
  305. kfree(buffer);
  306. if (ret != -1)
  307. return ret;
  308. }
  309. total_capacity = capacity;
  310. heads = 64;
  311. sectors = 32;
  312. cylinders = total_capacity / (heads * sectors);
  313. if (cylinders > 1024) {
  314. heads = 255;
  315. sectors = 63;
  316. cylinders = total_capacity / (heads * sectors);
  317. }
  318. geom[0] = heads;
  319. geom[1] = sectors;
  320. geom[2] = cylinders;
  321. return 0;
  322. }
  323. static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
  324. {
  325. struct MessageUnit_A __iomem *reg = acb->pmuA;
  326. int i;
  327. for (i = 0; i < 2000; i++) {
  328. if (readl(&reg->outbound_intstatus) &
  329. ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
  330. writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
  331. &reg->outbound_intstatus);
  332. return true;
  333. }
  334. msleep(10);
  335. } /* max 20 seconds */
  336. return false;
  337. }
  338. static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
  339. {
  340. struct MessageUnit_B *reg = acb->pmuB;
  341. int i;
  342. for (i = 0; i < 2000; i++) {
  343. if (readl(reg->iop2drv_doorbell)
  344. & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
  345. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
  346. reg->iop2drv_doorbell);
  347. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
  348. reg->drv2iop_doorbell);
  349. return true;
  350. }
  351. msleep(10);
  352. } /* max 20 seconds */
  353. return false;
  354. }
  355. static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
  356. {
  357. struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
  358. int i;
  359. for (i = 0; i < 2000; i++) {
  360. if (readl(&phbcmu->outbound_doorbell)
  361. & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
  362. writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
  363. &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
  364. return true;
  365. }
  366. msleep(10);
  367. } /* max 20 seconds */
  368. return false;
  369. }
  370. static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
  371. {
  372. struct MessageUnit_D *reg = pACB->pmuD;
  373. int i;
  374. for (i = 0; i < 2000; i++) {
  375. if (readl(reg->outbound_doorbell)
  376. & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
  377. writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
  378. reg->outbound_doorbell);
  379. return true;
  380. }
  381. msleep(10);
  382. } /* max 20 seconds */
  383. return false;
  384. }
  385. static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
  386. {
  387. struct MessageUnit_A __iomem *reg = acb->pmuA;
  388. int retry_count = 30;
  389. writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
  390. do {
  391. if (arcmsr_hbaA_wait_msgint_ready(acb))
  392. break;
  393. else {
  394. retry_count--;
  395. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  396. timeout, retry count down = %d \n", acb->host->host_no, retry_count);
  397. }
  398. } while (retry_count != 0);
  399. }
  400. static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
  401. {
  402. struct MessageUnit_B *reg = acb->pmuB;
  403. int retry_count = 30;
  404. writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
  405. do {
  406. if (arcmsr_hbaB_wait_msgint_ready(acb))
  407. break;
  408. else {
  409. retry_count--;
  410. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  411. timeout,retry count down = %d \n", acb->host->host_no, retry_count);
  412. }
  413. } while (retry_count != 0);
  414. }
  415. static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
  416. {
  417. struct MessageUnit_C __iomem *reg = pACB->pmuC;
  418. int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
  419. writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
  420. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  421. do {
  422. if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
  423. break;
  424. } else {
  425. retry_count--;
  426. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  427. timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
  428. }
  429. } while (retry_count != 0);
  430. return;
  431. }
  432. static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
  433. {
  434. int retry_count = 15;
  435. struct MessageUnit_D *reg = pACB->pmuD;
  436. writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
  437. do {
  438. if (arcmsr_hbaD_wait_msgint_ready(pACB))
  439. break;
  440. retry_count--;
  441. pr_notice("arcmsr%d: wait 'flush adapter "
  442. "cache' timeout, retry count down = %d\n",
  443. pACB->host->host_no, retry_count);
  444. } while (retry_count != 0);
  445. }
  446. static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
  447. {
  448. switch (acb->adapter_type) {
  449. case ACB_ADAPTER_TYPE_A: {
  450. arcmsr_hbaA_flush_cache(acb);
  451. }
  452. break;
  453. case ACB_ADAPTER_TYPE_B: {
  454. arcmsr_hbaB_flush_cache(acb);
  455. }
  456. break;
  457. case ACB_ADAPTER_TYPE_C: {
  458. arcmsr_hbaC_flush_cache(acb);
  459. }
  460. break;
  461. case ACB_ADAPTER_TYPE_D:
  462. arcmsr_hbaD_flush_cache(acb);
  463. break;
  464. }
  465. }
  466. static bool arcmsr_alloc_io_queue(struct AdapterControlBlock *acb)
  467. {
  468. bool rtn = true;
  469. void *dma_coherent;
  470. dma_addr_t dma_coherent_handle;
  471. struct pci_dev *pdev = acb->pdev;
  472. switch (acb->adapter_type) {
  473. case ACB_ADAPTER_TYPE_B: {
  474. struct MessageUnit_B *reg;
  475. acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
  476. dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
  477. &dma_coherent_handle, GFP_KERNEL);
  478. if (!dma_coherent) {
  479. pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
  480. return false;
  481. }
  482. acb->dma_coherent_handle2 = dma_coherent_handle;
  483. acb->dma_coherent2 = dma_coherent;
  484. reg = (struct MessageUnit_B *)dma_coherent;
  485. acb->pmuB = reg;
  486. if (acb->pdev->device == PCI_DEVICE_ID_ARECA_1203) {
  487. reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203);
  488. reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203);
  489. reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203);
  490. reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203);
  491. } else {
  492. reg->drv2iop_doorbell = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL);
  493. reg->drv2iop_doorbell_mask = MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK);
  494. reg->iop2drv_doorbell = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL);
  495. reg->iop2drv_doorbell_mask = MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK);
  496. }
  497. reg->message_wbuffer = MEM_BASE1(ARCMSR_MESSAGE_WBUFFER);
  498. reg->message_rbuffer = MEM_BASE1(ARCMSR_MESSAGE_RBUFFER);
  499. reg->message_rwbuffer = MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER);
  500. }
  501. break;
  502. case ACB_ADAPTER_TYPE_D: {
  503. struct MessageUnit_D *reg;
  504. acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
  505. dma_coherent = dma_zalloc_coherent(&pdev->dev, acb->roundup_ccbsize,
  506. &dma_coherent_handle, GFP_KERNEL);
  507. if (!dma_coherent) {
  508. pr_notice("arcmsr%d: DMA allocation failed\n", acb->host->host_no);
  509. return false;
  510. }
  511. acb->dma_coherent_handle2 = dma_coherent_handle;
  512. acb->dma_coherent2 = dma_coherent;
  513. reg = (struct MessageUnit_D *)dma_coherent;
  514. acb->pmuD = reg;
  515. reg->chip_id = MEM_BASE0(ARCMSR_ARC1214_CHIP_ID);
  516. reg->cpu_mem_config = MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION);
  517. reg->i2o_host_interrupt_mask = MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK);
  518. reg->sample_at_reset = MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET);
  519. reg->reset_request = MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST);
  520. reg->host_int_status = MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS);
  521. reg->pcief0_int_enable = MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE);
  522. reg->inbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0);
  523. reg->inbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1);
  524. reg->outbound_msgaddr0 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0);
  525. reg->outbound_msgaddr1 = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1);
  526. reg->inbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL);
  527. reg->outbound_doorbell = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL);
  528. reg->outbound_doorbell_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE);
  529. reg->inboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW);
  530. reg->inboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH);
  531. reg->inboundlist_write_pointer = MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER);
  532. reg->outboundlist_base_low = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW);
  533. reg->outboundlist_base_high = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH);
  534. reg->outboundlist_copy_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER);
  535. reg->outboundlist_read_pointer = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER);
  536. reg->outboundlist_interrupt_cause = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE);
  537. reg->outboundlist_interrupt_enable = MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE);
  538. reg->message_wbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER);
  539. reg->message_rbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER);
  540. reg->msgcode_rwbuffer = MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER);
  541. }
  542. break;
  543. default:
  544. break;
  545. }
  546. return rtn;
  547. }
  548. static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
  549. {
  550. struct pci_dev *pdev = acb->pdev;
  551. void *dma_coherent;
  552. dma_addr_t dma_coherent_handle;
  553. struct CommandControlBlock *ccb_tmp;
  554. int i = 0, j = 0;
  555. dma_addr_t cdb_phyaddr;
  556. unsigned long roundup_ccbsize;
  557. unsigned long max_xfer_len;
  558. unsigned long max_sg_entrys;
  559. uint32_t firm_config_version;
  560. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  561. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  562. acb->devstate[i][j] = ARECA_RAID_GONE;
  563. max_xfer_len = ARCMSR_MAX_XFER_LEN;
  564. max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
  565. firm_config_version = acb->firm_cfg_version;
  566. if((firm_config_version & 0xFF) >= 3){
  567. max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
  568. max_sg_entrys = (max_xfer_len/4096);
  569. }
  570. acb->host->max_sectors = max_xfer_len/512;
  571. acb->host->sg_tablesize = max_sg_entrys;
  572. roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
  573. acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
  574. dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
  575. if(!dma_coherent){
  576. printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
  577. return -ENOMEM;
  578. }
  579. acb->dma_coherent = dma_coherent;
  580. acb->dma_coherent_handle = dma_coherent_handle;
  581. memset(dma_coherent, 0, acb->uncache_size);
  582. ccb_tmp = dma_coherent;
  583. acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
  584. for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
  585. cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
  586. switch (acb->adapter_type) {
  587. case ACB_ADAPTER_TYPE_A:
  588. case ACB_ADAPTER_TYPE_B:
  589. ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
  590. break;
  591. case ACB_ADAPTER_TYPE_C:
  592. case ACB_ADAPTER_TYPE_D:
  593. ccb_tmp->cdb_phyaddr = cdb_phyaddr;
  594. break;
  595. }
  596. acb->pccb_pool[i] = ccb_tmp;
  597. ccb_tmp->acb = acb;
  598. INIT_LIST_HEAD(&ccb_tmp->list);
  599. list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
  600. ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
  601. dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
  602. }
  603. return 0;
  604. }
  605. static void arcmsr_message_isr_bh_fn(struct work_struct *work)
  606. {
  607. struct AdapterControlBlock *acb = container_of(work,
  608. struct AdapterControlBlock, arcmsr_do_message_isr_bh);
  609. char *acb_dev_map = (char *)acb->device_map;
  610. uint32_t __iomem *signature = NULL;
  611. char __iomem *devicemap = NULL;
  612. int target, lun;
  613. struct scsi_device *psdev;
  614. char diff, temp;
  615. switch (acb->adapter_type) {
  616. case ACB_ADAPTER_TYPE_A: {
  617. struct MessageUnit_A __iomem *reg = acb->pmuA;
  618. signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
  619. devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
  620. break;
  621. }
  622. case ACB_ADAPTER_TYPE_B: {
  623. struct MessageUnit_B *reg = acb->pmuB;
  624. signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
  625. devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
  626. break;
  627. }
  628. case ACB_ADAPTER_TYPE_C: {
  629. struct MessageUnit_C __iomem *reg = acb->pmuC;
  630. signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
  631. devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
  632. break;
  633. }
  634. case ACB_ADAPTER_TYPE_D: {
  635. struct MessageUnit_D *reg = acb->pmuD;
  636. signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
  637. devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
  638. break;
  639. }
  640. }
  641. atomic_inc(&acb->rq_map_token);
  642. if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
  643. return;
  644. for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
  645. target++) {
  646. temp = readb(devicemap);
  647. diff = (*acb_dev_map) ^ temp;
  648. if (diff != 0) {
  649. *acb_dev_map = temp;
  650. for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
  651. lun++) {
  652. if ((diff & 0x01) == 1 &&
  653. (temp & 0x01) == 1) {
  654. scsi_add_device(acb->host,
  655. 0, target, lun);
  656. } else if ((diff & 0x01) == 1
  657. && (temp & 0x01) == 0) {
  658. psdev = scsi_device_lookup(acb->host,
  659. 0, target, lun);
  660. if (psdev != NULL) {
  661. scsi_remove_device(psdev);
  662. scsi_device_put(psdev);
  663. }
  664. }
  665. temp >>= 1;
  666. diff >>= 1;
  667. }
  668. }
  669. devicemap++;
  670. acb_dev_map++;
  671. }
  672. }
  673. static int
  674. arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
  675. {
  676. int i, j, r;
  677. struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
  678. for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++)
  679. entries[i].entry = i;
  680. r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS);
  681. if (r < 0)
  682. goto msi_int;
  683. acb->msix_vector_count = r;
  684. for (i = 0; i < r; i++) {
  685. if (request_irq(entries[i].vector,
  686. arcmsr_do_interrupt, 0, "arcmsr", acb)) {
  687. pr_warn("arcmsr%d: request_irq =%d failed!\n",
  688. acb->host->host_no, entries[i].vector);
  689. for (j = 0 ; j < i ; j++)
  690. free_irq(entries[j].vector, acb);
  691. pci_disable_msix(pdev);
  692. goto msi_int;
  693. }
  694. acb->entries[i] = entries[i];
  695. }
  696. acb->acb_flags |= ACB_F_MSIX_ENABLED;
  697. pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
  698. return SUCCESS;
  699. msi_int:
  700. if (pci_enable_msi_exact(pdev, 1) < 0)
  701. goto legacy_int;
  702. if (request_irq(pdev->irq, arcmsr_do_interrupt,
  703. IRQF_SHARED, "arcmsr", acb)) {
  704. pr_warn("arcmsr%d: request_irq =%d failed!\n",
  705. acb->host->host_no, pdev->irq);
  706. pci_disable_msi(pdev);
  707. goto legacy_int;
  708. }
  709. acb->acb_flags |= ACB_F_MSI_ENABLED;
  710. pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
  711. return SUCCESS;
  712. legacy_int:
  713. if (request_irq(pdev->irq, arcmsr_do_interrupt,
  714. IRQF_SHARED, "arcmsr", acb)) {
  715. pr_warn("arcmsr%d: request_irq = %d failed!\n",
  716. acb->host->host_no, pdev->irq);
  717. return FAILED;
  718. }
  719. return SUCCESS;
  720. }
  721. static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  722. {
  723. struct Scsi_Host *host;
  724. struct AdapterControlBlock *acb;
  725. uint8_t bus,dev_fun;
  726. int error;
  727. error = pci_enable_device(pdev);
  728. if(error){
  729. return -ENODEV;
  730. }
  731. host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
  732. if(!host){
  733. goto pci_disable_dev;
  734. }
  735. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  736. if(error){
  737. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  738. if(error){
  739. printk(KERN_WARNING
  740. "scsi%d: No suitable DMA mask available\n",
  741. host->host_no);
  742. goto scsi_host_release;
  743. }
  744. }
  745. init_waitqueue_head(&wait_q);
  746. bus = pdev->bus->number;
  747. dev_fun = pdev->devfn;
  748. acb = (struct AdapterControlBlock *) host->hostdata;
  749. memset(acb,0,sizeof(struct AdapterControlBlock));
  750. acb->pdev = pdev;
  751. acb->host = host;
  752. host->max_lun = ARCMSR_MAX_TARGETLUN;
  753. host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
  754. host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
  755. host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD;
  756. host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
  757. host->this_id = ARCMSR_SCSI_INITIATOR_ID;
  758. host->unique_id = (bus << 8) | dev_fun;
  759. pci_set_drvdata(pdev, host);
  760. pci_set_master(pdev);
  761. error = pci_request_regions(pdev, "arcmsr");
  762. if(error){
  763. goto scsi_host_release;
  764. }
  765. spin_lock_init(&acb->eh_lock);
  766. spin_lock_init(&acb->ccblist_lock);
  767. spin_lock_init(&acb->postq_lock);
  768. spin_lock_init(&acb->doneq_lock);
  769. spin_lock_init(&acb->rqbuffer_lock);
  770. spin_lock_init(&acb->wqbuffer_lock);
  771. acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  772. ACB_F_MESSAGE_RQBUFFER_CLEARED |
  773. ACB_F_MESSAGE_WQBUFFER_READED);
  774. acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
  775. INIT_LIST_HEAD(&acb->ccb_free_list);
  776. acb->adapter_type = id->driver_data;
  777. error = arcmsr_remap_pciregion(acb);
  778. if(!error){
  779. goto pci_release_regs;
  780. }
  781. error = arcmsr_alloc_io_queue(acb);
  782. if (!error)
  783. goto unmap_pci_region;
  784. error = arcmsr_get_firmware_spec(acb);
  785. if(!error){
  786. goto free_hbb_mu;
  787. }
  788. error = arcmsr_alloc_ccb_pool(acb);
  789. if(error){
  790. goto free_hbb_mu;
  791. }
  792. error = scsi_add_host(host, &pdev->dev);
  793. if(error){
  794. goto free_ccb_pool;
  795. }
  796. if (arcmsr_request_irq(pdev, acb) == FAILED)
  797. goto scsi_host_remove;
  798. arcmsr_iop_init(acb);
  799. INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
  800. atomic_set(&acb->rq_map_token, 16);
  801. atomic_set(&acb->ante_token_value, 16);
  802. acb->fw_flag = FW_NORMAL;
  803. init_timer(&acb->eternal_timer);
  804. acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
  805. acb->eternal_timer.data = (unsigned long) acb;
  806. acb->eternal_timer.function = &arcmsr_request_device_map;
  807. add_timer(&acb->eternal_timer);
  808. if(arcmsr_alloc_sysfs_attr(acb))
  809. goto out_free_sysfs;
  810. scsi_scan_host(host);
  811. return 0;
  812. out_free_sysfs:
  813. del_timer_sync(&acb->eternal_timer);
  814. flush_work(&acb->arcmsr_do_message_isr_bh);
  815. arcmsr_stop_adapter_bgrb(acb);
  816. arcmsr_flush_adapter_cache(acb);
  817. arcmsr_free_irq(pdev, acb);
  818. scsi_host_remove:
  819. scsi_remove_host(host);
  820. free_ccb_pool:
  821. arcmsr_free_ccb_pool(acb);
  822. free_hbb_mu:
  823. arcmsr_free_mu(acb);
  824. unmap_pci_region:
  825. arcmsr_unmap_pciregion(acb);
  826. pci_release_regs:
  827. pci_release_regions(pdev);
  828. scsi_host_release:
  829. scsi_host_put(host);
  830. pci_disable_dev:
  831. pci_disable_device(pdev);
  832. return -ENODEV;
  833. }
  834. static void arcmsr_free_irq(struct pci_dev *pdev,
  835. struct AdapterControlBlock *acb)
  836. {
  837. int i;
  838. if (acb->acb_flags & ACB_F_MSI_ENABLED) {
  839. free_irq(pdev->irq, acb);
  840. pci_disable_msi(pdev);
  841. } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
  842. for (i = 0; i < acb->msix_vector_count; i++)
  843. free_irq(acb->entries[i].vector, acb);
  844. pci_disable_msix(pdev);
  845. } else
  846. free_irq(pdev->irq, acb);
  847. }
  848. static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
  849. {
  850. uint32_t intmask_org;
  851. struct Scsi_Host *host = pci_get_drvdata(pdev);
  852. struct AdapterControlBlock *acb =
  853. (struct AdapterControlBlock *)host->hostdata;
  854. intmask_org = arcmsr_disable_outbound_ints(acb);
  855. arcmsr_free_irq(pdev, acb);
  856. del_timer_sync(&acb->eternal_timer);
  857. flush_work(&acb->arcmsr_do_message_isr_bh);
  858. arcmsr_stop_adapter_bgrb(acb);
  859. arcmsr_flush_adapter_cache(acb);
  860. pci_set_drvdata(pdev, host);
  861. pci_save_state(pdev);
  862. pci_disable_device(pdev);
  863. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  864. return 0;
  865. }
  866. static int arcmsr_resume(struct pci_dev *pdev)
  867. {
  868. int error;
  869. struct Scsi_Host *host = pci_get_drvdata(pdev);
  870. struct AdapterControlBlock *acb =
  871. (struct AdapterControlBlock *)host->hostdata;
  872. pci_set_power_state(pdev, PCI_D0);
  873. pci_enable_wake(pdev, PCI_D0, 0);
  874. pci_restore_state(pdev);
  875. if (pci_enable_device(pdev)) {
  876. pr_warn("%s: pci_enable_device error\n", __func__);
  877. return -ENODEV;
  878. }
  879. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  880. if (error) {
  881. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  882. if (error) {
  883. pr_warn("scsi%d: No suitable DMA mask available\n",
  884. host->host_no);
  885. goto controller_unregister;
  886. }
  887. }
  888. pci_set_master(pdev);
  889. if (arcmsr_request_irq(pdev, acb) == FAILED)
  890. goto controller_stop;
  891. arcmsr_iop_init(acb);
  892. INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
  893. atomic_set(&acb->rq_map_token, 16);
  894. atomic_set(&acb->ante_token_value, 16);
  895. acb->fw_flag = FW_NORMAL;
  896. init_timer(&acb->eternal_timer);
  897. acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
  898. acb->eternal_timer.data = (unsigned long) acb;
  899. acb->eternal_timer.function = &arcmsr_request_device_map;
  900. add_timer(&acb->eternal_timer);
  901. return 0;
  902. controller_stop:
  903. arcmsr_stop_adapter_bgrb(acb);
  904. arcmsr_flush_adapter_cache(acb);
  905. controller_unregister:
  906. scsi_remove_host(host);
  907. arcmsr_free_ccb_pool(acb);
  908. arcmsr_unmap_pciregion(acb);
  909. pci_release_regions(pdev);
  910. scsi_host_put(host);
  911. pci_disable_device(pdev);
  912. return -ENODEV;
  913. }
  914. static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
  915. {
  916. struct MessageUnit_A __iomem *reg = acb->pmuA;
  917. writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
  918. if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
  919. printk(KERN_NOTICE
  920. "arcmsr%d: wait 'abort all outstanding command' timeout\n"
  921. , acb->host->host_no);
  922. return false;
  923. }
  924. return true;
  925. }
  926. static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
  927. {
  928. struct MessageUnit_B *reg = acb->pmuB;
  929. writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
  930. if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
  931. printk(KERN_NOTICE
  932. "arcmsr%d: wait 'abort all outstanding command' timeout\n"
  933. , acb->host->host_no);
  934. return false;
  935. }
  936. return true;
  937. }
  938. static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
  939. {
  940. struct MessageUnit_C __iomem *reg = pACB->pmuC;
  941. writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
  942. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  943. if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
  944. printk(KERN_NOTICE
  945. "arcmsr%d: wait 'abort all outstanding command' timeout\n"
  946. , pACB->host->host_no);
  947. return false;
  948. }
  949. return true;
  950. }
  951. static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
  952. {
  953. struct MessageUnit_D *reg = pACB->pmuD;
  954. writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
  955. if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
  956. pr_notice("arcmsr%d: wait 'abort all outstanding "
  957. "command' timeout\n", pACB->host->host_no);
  958. return false;
  959. }
  960. return true;
  961. }
  962. static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
  963. {
  964. uint8_t rtnval = 0;
  965. switch (acb->adapter_type) {
  966. case ACB_ADAPTER_TYPE_A: {
  967. rtnval = arcmsr_hbaA_abort_allcmd(acb);
  968. }
  969. break;
  970. case ACB_ADAPTER_TYPE_B: {
  971. rtnval = arcmsr_hbaB_abort_allcmd(acb);
  972. }
  973. break;
  974. case ACB_ADAPTER_TYPE_C: {
  975. rtnval = arcmsr_hbaC_abort_allcmd(acb);
  976. }
  977. break;
  978. case ACB_ADAPTER_TYPE_D:
  979. rtnval = arcmsr_hbaD_abort_allcmd(acb);
  980. break;
  981. }
  982. return rtnval;
  983. }
  984. static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
  985. {
  986. struct scsi_cmnd *pcmd = ccb->pcmd;
  987. scsi_dma_unmap(pcmd);
  988. }
  989. static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
  990. {
  991. struct AdapterControlBlock *acb = ccb->acb;
  992. struct scsi_cmnd *pcmd = ccb->pcmd;
  993. unsigned long flags;
  994. atomic_dec(&acb->ccboutstandingcount);
  995. arcmsr_pci_unmap_dma(ccb);
  996. ccb->startdone = ARCMSR_CCB_DONE;
  997. spin_lock_irqsave(&acb->ccblist_lock, flags);
  998. list_add_tail(&ccb->list, &acb->ccb_free_list);
  999. spin_unlock_irqrestore(&acb->ccblist_lock, flags);
  1000. pcmd->scsi_done(pcmd);
  1001. }
  1002. static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
  1003. {
  1004. struct scsi_cmnd *pcmd = ccb->pcmd;
  1005. struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
  1006. pcmd->result = DID_OK << 16;
  1007. if (sensebuffer) {
  1008. int sense_data_length =
  1009. sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
  1010. ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
  1011. memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
  1012. memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
  1013. sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
  1014. sensebuffer->Valid = 1;
  1015. }
  1016. }
  1017. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
  1018. {
  1019. u32 orig_mask = 0;
  1020. switch (acb->adapter_type) {
  1021. case ACB_ADAPTER_TYPE_A : {
  1022. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1023. orig_mask = readl(&reg->outbound_intmask);
  1024. writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
  1025. &reg->outbound_intmask);
  1026. }
  1027. break;
  1028. case ACB_ADAPTER_TYPE_B : {
  1029. struct MessageUnit_B *reg = acb->pmuB;
  1030. orig_mask = readl(reg->iop2drv_doorbell_mask);
  1031. writel(0, reg->iop2drv_doorbell_mask);
  1032. }
  1033. break;
  1034. case ACB_ADAPTER_TYPE_C:{
  1035. struct MessageUnit_C __iomem *reg = acb->pmuC;
  1036. /* disable all outbound interrupt */
  1037. orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */
  1038. writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
  1039. }
  1040. break;
  1041. case ACB_ADAPTER_TYPE_D: {
  1042. struct MessageUnit_D *reg = acb->pmuD;
  1043. /* disable all outbound interrupt */
  1044. writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
  1045. }
  1046. break;
  1047. }
  1048. return orig_mask;
  1049. }
  1050. static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
  1051. struct CommandControlBlock *ccb, bool error)
  1052. {
  1053. uint8_t id, lun;
  1054. id = ccb->pcmd->device->id;
  1055. lun = ccb->pcmd->device->lun;
  1056. if (!error) {
  1057. if (acb->devstate[id][lun] == ARECA_RAID_GONE)
  1058. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  1059. ccb->pcmd->result = DID_OK << 16;
  1060. arcmsr_ccb_complete(ccb);
  1061. }else{
  1062. switch (ccb->arcmsr_cdb.DeviceStatus) {
  1063. case ARCMSR_DEV_SELECT_TIMEOUT: {
  1064. acb->devstate[id][lun] = ARECA_RAID_GONE;
  1065. ccb->pcmd->result = DID_NO_CONNECT << 16;
  1066. arcmsr_ccb_complete(ccb);
  1067. }
  1068. break;
  1069. case ARCMSR_DEV_ABORTED:
  1070. case ARCMSR_DEV_INIT_FAIL: {
  1071. acb->devstate[id][lun] = ARECA_RAID_GONE;
  1072. ccb->pcmd->result = DID_BAD_TARGET << 16;
  1073. arcmsr_ccb_complete(ccb);
  1074. }
  1075. break;
  1076. case ARCMSR_DEV_CHECK_CONDITION: {
  1077. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  1078. arcmsr_report_sense_info(ccb);
  1079. arcmsr_ccb_complete(ccb);
  1080. }
  1081. break;
  1082. default:
  1083. printk(KERN_NOTICE
  1084. "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
  1085. but got unknown DeviceStatus = 0x%x \n"
  1086. , acb->host->host_no
  1087. , id
  1088. , lun
  1089. , ccb->arcmsr_cdb.DeviceStatus);
  1090. acb->devstate[id][lun] = ARECA_RAID_GONE;
  1091. ccb->pcmd->result = DID_NO_CONNECT << 16;
  1092. arcmsr_ccb_complete(ccb);
  1093. break;
  1094. }
  1095. }
  1096. }
  1097. static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
  1098. {
  1099. int id, lun;
  1100. if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
  1101. if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
  1102. struct scsi_cmnd *abortcmd = pCCB->pcmd;
  1103. if (abortcmd) {
  1104. id = abortcmd->device->id;
  1105. lun = abortcmd->device->lun;
  1106. abortcmd->result |= DID_ABORT << 16;
  1107. arcmsr_ccb_complete(pCCB);
  1108. printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
  1109. acb->host->host_no, pCCB);
  1110. }
  1111. return;
  1112. }
  1113. printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
  1114. done acb = '0x%p'"
  1115. "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
  1116. " ccboutstandingcount = %d \n"
  1117. , acb->host->host_no
  1118. , acb
  1119. , pCCB
  1120. , pCCB->acb
  1121. , pCCB->startdone
  1122. , atomic_read(&acb->ccboutstandingcount));
  1123. return;
  1124. }
  1125. arcmsr_report_ccb_state(acb, pCCB, error);
  1126. }
  1127. static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
  1128. {
  1129. int i = 0;
  1130. uint32_t flag_ccb, ccb_cdb_phy;
  1131. struct ARCMSR_CDB *pARCMSR_CDB;
  1132. bool error;
  1133. struct CommandControlBlock *pCCB;
  1134. switch (acb->adapter_type) {
  1135. case ACB_ADAPTER_TYPE_A: {
  1136. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1137. uint32_t outbound_intstatus;
  1138. outbound_intstatus = readl(&reg->outbound_intstatus) &
  1139. acb->outbound_int_enable;
  1140. /*clear and abort all outbound posted Q*/
  1141. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  1142. while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
  1143. && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
  1144. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
  1145. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  1146. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  1147. arcmsr_drain_donequeue(acb, pCCB, error);
  1148. }
  1149. }
  1150. break;
  1151. case ACB_ADAPTER_TYPE_B: {
  1152. struct MessageUnit_B *reg = acb->pmuB;
  1153. /*clear all outbound posted Q*/
  1154. writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
  1155. for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
  1156. flag_ccb = reg->done_qbuffer[i];
  1157. if (flag_ccb != 0) {
  1158. reg->done_qbuffer[i] = 0;
  1159. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
  1160. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  1161. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  1162. arcmsr_drain_donequeue(acb, pCCB, error);
  1163. }
  1164. reg->post_qbuffer[i] = 0;
  1165. }
  1166. reg->doneq_index = 0;
  1167. reg->postq_index = 0;
  1168. }
  1169. break;
  1170. case ACB_ADAPTER_TYPE_C: {
  1171. struct MessageUnit_C __iomem *reg = acb->pmuC;
  1172. while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
  1173. /*need to do*/
  1174. flag_ccb = readl(&reg->outbound_queueport_low);
  1175. ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
  1176. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/
  1177. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  1178. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
  1179. arcmsr_drain_donequeue(acb, pCCB, error);
  1180. }
  1181. }
  1182. break;
  1183. case ACB_ADAPTER_TYPE_D: {
  1184. struct MessageUnit_D *pmu = acb->pmuD;
  1185. uint32_t outbound_write_pointer;
  1186. uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
  1187. unsigned long flags;
  1188. residual = atomic_read(&acb->ccboutstandingcount);
  1189. for (i = 0; i < residual; i++) {
  1190. spin_lock_irqsave(&acb->doneq_lock, flags);
  1191. outbound_write_pointer =
  1192. pmu->done_qbuffer[0].addressLow + 1;
  1193. doneq_index = pmu->doneq_index;
  1194. if ((doneq_index & 0xFFF) !=
  1195. (outbound_write_pointer & 0xFFF)) {
  1196. toggle = doneq_index & 0x4000;
  1197. index_stripped = (doneq_index & 0xFFF) + 1;
  1198. index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
  1199. pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
  1200. ((toggle ^ 0x4000) + 1);
  1201. doneq_index = pmu->doneq_index;
  1202. spin_unlock_irqrestore(&acb->doneq_lock, flags);
  1203. addressLow = pmu->done_qbuffer[doneq_index &
  1204. 0xFFF].addressLow;
  1205. ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
  1206. pARCMSR_CDB = (struct ARCMSR_CDB *)
  1207. (acb->vir2phy_offset + ccb_cdb_phy);
  1208. pCCB = container_of(pARCMSR_CDB,
  1209. struct CommandControlBlock, arcmsr_cdb);
  1210. error = (addressLow &
  1211. ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
  1212. true : false;
  1213. arcmsr_drain_donequeue(acb, pCCB, error);
  1214. writel(doneq_index,
  1215. pmu->outboundlist_read_pointer);
  1216. } else {
  1217. spin_unlock_irqrestore(&acb->doneq_lock, flags);
  1218. mdelay(10);
  1219. }
  1220. }
  1221. pmu->postq_index = 0;
  1222. pmu->doneq_index = 0x40FF;
  1223. }
  1224. break;
  1225. }
  1226. }
  1227. static void arcmsr_remove(struct pci_dev *pdev)
  1228. {
  1229. struct Scsi_Host *host = pci_get_drvdata(pdev);
  1230. struct AdapterControlBlock *acb =
  1231. (struct AdapterControlBlock *) host->hostdata;
  1232. int poll_count = 0;
  1233. arcmsr_free_sysfs_attr(acb);
  1234. scsi_remove_host(host);
  1235. flush_work(&acb->arcmsr_do_message_isr_bh);
  1236. del_timer_sync(&acb->eternal_timer);
  1237. arcmsr_disable_outbound_ints(acb);
  1238. arcmsr_stop_adapter_bgrb(acb);
  1239. arcmsr_flush_adapter_cache(acb);
  1240. acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
  1241. acb->acb_flags &= ~ACB_F_IOP_INITED;
  1242. for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
  1243. if (!atomic_read(&acb->ccboutstandingcount))
  1244. break;
  1245. arcmsr_interrupt(acb);/* FIXME: need spinlock */
  1246. msleep(25);
  1247. }
  1248. if (atomic_read(&acb->ccboutstandingcount)) {
  1249. int i;
  1250. arcmsr_abort_allcmd(acb);
  1251. arcmsr_done4abort_postqueue(acb);
  1252. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  1253. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  1254. if (ccb->startdone == ARCMSR_CCB_START) {
  1255. ccb->startdone = ARCMSR_CCB_ABORTED;
  1256. ccb->pcmd->result = DID_ABORT << 16;
  1257. arcmsr_ccb_complete(ccb);
  1258. }
  1259. }
  1260. }
  1261. arcmsr_free_irq(pdev, acb);
  1262. arcmsr_free_ccb_pool(acb);
  1263. arcmsr_free_mu(acb);
  1264. arcmsr_unmap_pciregion(acb);
  1265. pci_release_regions(pdev);
  1266. scsi_host_put(host);
  1267. pci_disable_device(pdev);
  1268. }
  1269. static void arcmsr_shutdown(struct pci_dev *pdev)
  1270. {
  1271. struct Scsi_Host *host = pci_get_drvdata(pdev);
  1272. struct AdapterControlBlock *acb =
  1273. (struct AdapterControlBlock *)host->hostdata;
  1274. del_timer_sync(&acb->eternal_timer);
  1275. arcmsr_disable_outbound_ints(acb);
  1276. arcmsr_free_irq(pdev, acb);
  1277. flush_work(&acb->arcmsr_do_message_isr_bh);
  1278. arcmsr_stop_adapter_bgrb(acb);
  1279. arcmsr_flush_adapter_cache(acb);
  1280. }
  1281. static int arcmsr_module_init(void)
  1282. {
  1283. int error = 0;
  1284. error = pci_register_driver(&arcmsr_pci_driver);
  1285. return error;
  1286. }
  1287. static void arcmsr_module_exit(void)
  1288. {
  1289. pci_unregister_driver(&arcmsr_pci_driver);
  1290. }
  1291. module_init(arcmsr_module_init);
  1292. module_exit(arcmsr_module_exit);
  1293. static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
  1294. u32 intmask_org)
  1295. {
  1296. u32 mask;
  1297. switch (acb->adapter_type) {
  1298. case ACB_ADAPTER_TYPE_A: {
  1299. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1300. mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
  1301. ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
  1302. ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
  1303. writel(mask, &reg->outbound_intmask);
  1304. acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
  1305. }
  1306. break;
  1307. case ACB_ADAPTER_TYPE_B: {
  1308. struct MessageUnit_B *reg = acb->pmuB;
  1309. mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
  1310. ARCMSR_IOP2DRV_DATA_READ_OK |
  1311. ARCMSR_IOP2DRV_CDB_DONE |
  1312. ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
  1313. writel(mask, reg->iop2drv_doorbell_mask);
  1314. acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
  1315. }
  1316. break;
  1317. case ACB_ADAPTER_TYPE_C: {
  1318. struct MessageUnit_C __iomem *reg = acb->pmuC;
  1319. mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
  1320. writel(intmask_org & mask, &reg->host_int_mask);
  1321. acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
  1322. }
  1323. break;
  1324. case ACB_ADAPTER_TYPE_D: {
  1325. struct MessageUnit_D *reg = acb->pmuD;
  1326. mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
  1327. writel(intmask_org | mask, reg->pcief0_int_enable);
  1328. break;
  1329. }
  1330. }
  1331. }
  1332. static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
  1333. struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
  1334. {
  1335. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  1336. int8_t *psge = (int8_t *)&arcmsr_cdb->u;
  1337. __le32 address_lo, address_hi;
  1338. int arccdbsize = 0x30;
  1339. __le32 length = 0;
  1340. int i;
  1341. struct scatterlist *sg;
  1342. int nseg;
  1343. ccb->pcmd = pcmd;
  1344. memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
  1345. arcmsr_cdb->TargetID = pcmd->device->id;
  1346. arcmsr_cdb->LUN = pcmd->device->lun;
  1347. arcmsr_cdb->Function = 1;
  1348. arcmsr_cdb->msgContext = 0;
  1349. memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
  1350. nseg = scsi_dma_map(pcmd);
  1351. if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
  1352. return FAILED;
  1353. scsi_for_each_sg(pcmd, sg, nseg, i) {
  1354. /* Get the physical address of the current data pointer */
  1355. length = cpu_to_le32(sg_dma_len(sg));
  1356. address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
  1357. address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
  1358. if (address_hi == 0) {
  1359. struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
  1360. pdma_sg->address = address_lo;
  1361. pdma_sg->length = length;
  1362. psge += sizeof (struct SG32ENTRY);
  1363. arccdbsize += sizeof (struct SG32ENTRY);
  1364. } else {
  1365. struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
  1366. pdma_sg->addresshigh = address_hi;
  1367. pdma_sg->address = address_lo;
  1368. pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
  1369. psge += sizeof (struct SG64ENTRY);
  1370. arccdbsize += sizeof (struct SG64ENTRY);
  1371. }
  1372. }
  1373. arcmsr_cdb->sgcount = (uint8_t)nseg;
  1374. arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
  1375. arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
  1376. if ( arccdbsize > 256)
  1377. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
  1378. if (pcmd->sc_data_direction == DMA_TO_DEVICE)
  1379. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
  1380. ccb->arc_cdb_size = arccdbsize;
  1381. return SUCCESS;
  1382. }
  1383. static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
  1384. {
  1385. uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
  1386. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  1387. atomic_inc(&acb->ccboutstandingcount);
  1388. ccb->startdone = ARCMSR_CCB_START;
  1389. switch (acb->adapter_type) {
  1390. case ACB_ADAPTER_TYPE_A: {
  1391. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1392. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
  1393. writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
  1394. &reg->inbound_queueport);
  1395. else
  1396. writel(cdb_phyaddr, &reg->inbound_queueport);
  1397. break;
  1398. }
  1399. case ACB_ADAPTER_TYPE_B: {
  1400. struct MessageUnit_B *reg = acb->pmuB;
  1401. uint32_t ending_index, index = reg->postq_index;
  1402. ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
  1403. reg->post_qbuffer[ending_index] = 0;
  1404. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
  1405. reg->post_qbuffer[index] =
  1406. cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
  1407. } else {
  1408. reg->post_qbuffer[index] = cdb_phyaddr;
  1409. }
  1410. index++;
  1411. index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
  1412. reg->postq_index = index;
  1413. writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
  1414. }
  1415. break;
  1416. case ACB_ADAPTER_TYPE_C: {
  1417. struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
  1418. uint32_t ccb_post_stamp, arc_cdb_size;
  1419. arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
  1420. ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
  1421. if (acb->cdb_phyaddr_hi32) {
  1422. writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
  1423. writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
  1424. } else {
  1425. writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
  1426. }
  1427. }
  1428. break;
  1429. case ACB_ADAPTER_TYPE_D: {
  1430. struct MessageUnit_D *pmu = acb->pmuD;
  1431. u16 index_stripped;
  1432. u16 postq_index, toggle;
  1433. unsigned long flags;
  1434. struct InBound_SRB *pinbound_srb;
  1435. spin_lock_irqsave(&acb->postq_lock, flags);
  1436. postq_index = pmu->postq_index;
  1437. pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
  1438. pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr);
  1439. pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr);
  1440. pinbound_srb->length = ccb->arc_cdb_size >> 2;
  1441. arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
  1442. toggle = postq_index & 0x4000;
  1443. index_stripped = postq_index + 1;
  1444. index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
  1445. pmu->postq_index = index_stripped ? (index_stripped | toggle) :
  1446. (toggle ^ 0x4000);
  1447. writel(postq_index, pmu->inboundlist_write_pointer);
  1448. spin_unlock_irqrestore(&acb->postq_lock, flags);
  1449. break;
  1450. }
  1451. }
  1452. }
  1453. static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
  1454. {
  1455. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1456. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1457. writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
  1458. if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
  1459. printk(KERN_NOTICE
  1460. "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
  1461. , acb->host->host_no);
  1462. }
  1463. }
  1464. static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
  1465. {
  1466. struct MessageUnit_B *reg = acb->pmuB;
  1467. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1468. writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
  1469. if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
  1470. printk(KERN_NOTICE
  1471. "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
  1472. , acb->host->host_no);
  1473. }
  1474. }
  1475. static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
  1476. {
  1477. struct MessageUnit_C __iomem *reg = pACB->pmuC;
  1478. pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1479. writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
  1480. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  1481. if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
  1482. printk(KERN_NOTICE
  1483. "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
  1484. , pACB->host->host_no);
  1485. }
  1486. return;
  1487. }
  1488. static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
  1489. {
  1490. struct MessageUnit_D *reg = pACB->pmuD;
  1491. pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1492. writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
  1493. if (!arcmsr_hbaD_wait_msgint_ready(pACB))
  1494. pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
  1495. "timeout\n", pACB->host->host_no);
  1496. }
  1497. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
  1498. {
  1499. switch (acb->adapter_type) {
  1500. case ACB_ADAPTER_TYPE_A: {
  1501. arcmsr_hbaA_stop_bgrb(acb);
  1502. }
  1503. break;
  1504. case ACB_ADAPTER_TYPE_B: {
  1505. arcmsr_hbaB_stop_bgrb(acb);
  1506. }
  1507. break;
  1508. case ACB_ADAPTER_TYPE_C: {
  1509. arcmsr_hbaC_stop_bgrb(acb);
  1510. }
  1511. break;
  1512. case ACB_ADAPTER_TYPE_D:
  1513. arcmsr_hbaD_stop_bgrb(acb);
  1514. break;
  1515. }
  1516. }
  1517. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
  1518. {
  1519. dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
  1520. }
  1521. static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
  1522. {
  1523. switch (acb->adapter_type) {
  1524. case ACB_ADAPTER_TYPE_A: {
  1525. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1526. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  1527. }
  1528. break;
  1529. case ACB_ADAPTER_TYPE_B: {
  1530. struct MessageUnit_B *reg = acb->pmuB;
  1531. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
  1532. }
  1533. break;
  1534. case ACB_ADAPTER_TYPE_C: {
  1535. struct MessageUnit_C __iomem *reg = acb->pmuC;
  1536. writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
  1537. }
  1538. break;
  1539. case ACB_ADAPTER_TYPE_D: {
  1540. struct MessageUnit_D *reg = acb->pmuD;
  1541. writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
  1542. reg->inbound_doorbell);
  1543. }
  1544. break;
  1545. }
  1546. }
  1547. static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
  1548. {
  1549. switch (acb->adapter_type) {
  1550. case ACB_ADAPTER_TYPE_A: {
  1551. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1552. /*
  1553. ** push inbound doorbell tell iop, driver data write ok
  1554. ** and wait reply on next hwinterrupt for next Qbuffer post
  1555. */
  1556. writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
  1557. }
  1558. break;
  1559. case ACB_ADAPTER_TYPE_B: {
  1560. struct MessageUnit_B *reg = acb->pmuB;
  1561. /*
  1562. ** push inbound doorbell tell iop, driver data write ok
  1563. ** and wait reply on next hwinterrupt for next Qbuffer post
  1564. */
  1565. writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
  1566. }
  1567. break;
  1568. case ACB_ADAPTER_TYPE_C: {
  1569. struct MessageUnit_C __iomem *reg = acb->pmuC;
  1570. /*
  1571. ** push inbound doorbell tell iop, driver data write ok
  1572. ** and wait reply on next hwinterrupt for next Qbuffer post
  1573. */
  1574. writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell);
  1575. }
  1576. break;
  1577. case ACB_ADAPTER_TYPE_D: {
  1578. struct MessageUnit_D *reg = acb->pmuD;
  1579. writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
  1580. reg->inbound_doorbell);
  1581. }
  1582. break;
  1583. }
  1584. }
  1585. struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
  1586. {
  1587. struct QBUFFER __iomem *qbuffer = NULL;
  1588. switch (acb->adapter_type) {
  1589. case ACB_ADAPTER_TYPE_A: {
  1590. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1591. qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
  1592. }
  1593. break;
  1594. case ACB_ADAPTER_TYPE_B: {
  1595. struct MessageUnit_B *reg = acb->pmuB;
  1596. qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
  1597. }
  1598. break;
  1599. case ACB_ADAPTER_TYPE_C: {
  1600. struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
  1601. qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
  1602. }
  1603. break;
  1604. case ACB_ADAPTER_TYPE_D: {
  1605. struct MessageUnit_D *reg = acb->pmuD;
  1606. qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
  1607. }
  1608. break;
  1609. }
  1610. return qbuffer;
  1611. }
  1612. static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
  1613. {
  1614. struct QBUFFER __iomem *pqbuffer = NULL;
  1615. switch (acb->adapter_type) {
  1616. case ACB_ADAPTER_TYPE_A: {
  1617. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1618. pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
  1619. }
  1620. break;
  1621. case ACB_ADAPTER_TYPE_B: {
  1622. struct MessageUnit_B *reg = acb->pmuB;
  1623. pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
  1624. }
  1625. break;
  1626. case ACB_ADAPTER_TYPE_C: {
  1627. struct MessageUnit_C __iomem *reg = acb->pmuC;
  1628. pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
  1629. }
  1630. break;
  1631. case ACB_ADAPTER_TYPE_D: {
  1632. struct MessageUnit_D *reg = acb->pmuD;
  1633. pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
  1634. }
  1635. break;
  1636. }
  1637. return pqbuffer;
  1638. }
  1639. static uint32_t
  1640. arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
  1641. struct QBUFFER __iomem *prbuffer)
  1642. {
  1643. uint8_t *pQbuffer;
  1644. uint8_t *buf1 = NULL;
  1645. uint32_t __iomem *iop_data;
  1646. uint32_t iop_len, data_len, *buf2 = NULL;
  1647. iop_data = (uint32_t __iomem *)prbuffer->data;
  1648. iop_len = readl(&prbuffer->data_len);
  1649. if (iop_len > 0) {
  1650. buf1 = kmalloc(128, GFP_ATOMIC);
  1651. buf2 = (uint32_t *)buf1;
  1652. if (buf1 == NULL)
  1653. return 0;
  1654. data_len = iop_len;
  1655. while (data_len >= 4) {
  1656. *buf2++ = readl(iop_data);
  1657. iop_data++;
  1658. data_len -= 4;
  1659. }
  1660. if (data_len)
  1661. *buf2 = readl(iop_data);
  1662. buf2 = (uint32_t *)buf1;
  1663. }
  1664. while (iop_len > 0) {
  1665. pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
  1666. *pQbuffer = *buf1;
  1667. acb->rqbuf_putIndex++;
  1668. /* if last, index number set it to 0 */
  1669. acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
  1670. buf1++;
  1671. iop_len--;
  1672. }
  1673. kfree(buf2);
  1674. /* let IOP know data has been read */
  1675. arcmsr_iop_message_read(acb);
  1676. return 1;
  1677. }
  1678. uint32_t
  1679. arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
  1680. struct QBUFFER __iomem *prbuffer) {
  1681. uint8_t *pQbuffer;
  1682. uint8_t __iomem *iop_data;
  1683. uint32_t iop_len;
  1684. if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D))
  1685. return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
  1686. iop_data = (uint8_t __iomem *)prbuffer->data;
  1687. iop_len = readl(&prbuffer->data_len);
  1688. while (iop_len > 0) {
  1689. pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
  1690. *pQbuffer = readb(iop_data);
  1691. acb->rqbuf_putIndex++;
  1692. acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
  1693. iop_data++;
  1694. iop_len--;
  1695. }
  1696. arcmsr_iop_message_read(acb);
  1697. return 1;
  1698. }
  1699. static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
  1700. {
  1701. unsigned long flags;
  1702. struct QBUFFER __iomem *prbuffer;
  1703. int32_t buf_empty_len;
  1704. spin_lock_irqsave(&acb->rqbuffer_lock, flags);
  1705. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  1706. buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) &
  1707. (ARCMSR_MAX_QBUFFER - 1);
  1708. if (buf_empty_len >= readl(&prbuffer->data_len)) {
  1709. if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
  1710. acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
  1711. } else
  1712. acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
  1713. spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
  1714. }
  1715. static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
  1716. {
  1717. uint8_t *pQbuffer;
  1718. struct QBUFFER __iomem *pwbuffer;
  1719. uint8_t *buf1 = NULL;
  1720. uint32_t __iomem *iop_data;
  1721. uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
  1722. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
  1723. buf1 = kmalloc(128, GFP_ATOMIC);
  1724. buf2 = (uint32_t *)buf1;
  1725. if (buf1 == NULL)
  1726. return;
  1727. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1728. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1729. iop_data = (uint32_t __iomem *)pwbuffer->data;
  1730. while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
  1731. && (allxfer_len < 124)) {
  1732. pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
  1733. *buf1 = *pQbuffer;
  1734. acb->wqbuf_getIndex++;
  1735. acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
  1736. buf1++;
  1737. allxfer_len++;
  1738. }
  1739. data_len = allxfer_len;
  1740. buf1 = (uint8_t *)buf2;
  1741. while (data_len >= 4) {
  1742. data = *buf2++;
  1743. writel(data, iop_data);
  1744. iop_data++;
  1745. data_len -= 4;
  1746. }
  1747. if (data_len) {
  1748. data = *buf2;
  1749. writel(data, iop_data);
  1750. }
  1751. writel(allxfer_len, &pwbuffer->data_len);
  1752. kfree(buf1);
  1753. arcmsr_iop_message_wrote(acb);
  1754. }
  1755. }
  1756. void
  1757. arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
  1758. {
  1759. uint8_t *pQbuffer;
  1760. struct QBUFFER __iomem *pwbuffer;
  1761. uint8_t __iomem *iop_data;
  1762. int32_t allxfer_len = 0;
  1763. if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
  1764. arcmsr_write_ioctldata2iop_in_DWORD(acb);
  1765. return;
  1766. }
  1767. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
  1768. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1769. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1770. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1771. while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
  1772. && (allxfer_len < 124)) {
  1773. pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
  1774. writeb(*pQbuffer, iop_data);
  1775. acb->wqbuf_getIndex++;
  1776. acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
  1777. iop_data++;
  1778. allxfer_len++;
  1779. }
  1780. writel(allxfer_len, &pwbuffer->data_len);
  1781. arcmsr_iop_message_wrote(acb);
  1782. }
  1783. }
  1784. static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
  1785. {
  1786. unsigned long flags;
  1787. spin_lock_irqsave(&acb->wqbuffer_lock, flags);
  1788. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
  1789. if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
  1790. arcmsr_write_ioctldata2iop(acb);
  1791. if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
  1792. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1793. spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
  1794. }
  1795. static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
  1796. {
  1797. uint32_t outbound_doorbell;
  1798. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1799. outbound_doorbell = readl(&reg->outbound_doorbell);
  1800. do {
  1801. writel(outbound_doorbell, &reg->outbound_doorbell);
  1802. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
  1803. arcmsr_iop2drv_data_wrote_handle(acb);
  1804. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
  1805. arcmsr_iop2drv_data_read_handle(acb);
  1806. outbound_doorbell = readl(&reg->outbound_doorbell);
  1807. } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
  1808. | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
  1809. }
  1810. static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
  1811. {
  1812. uint32_t outbound_doorbell;
  1813. struct MessageUnit_C __iomem *reg = pACB->pmuC;
  1814. /*
  1815. *******************************************************************
  1816. ** Maybe here we need to check wrqbuffer_lock is lock or not
  1817. ** DOORBELL: din! don!
  1818. ** check if there are any mail need to pack from firmware
  1819. *******************************************************************
  1820. */
  1821. outbound_doorbell = readl(&reg->outbound_doorbell);
  1822. do {
  1823. writel(outbound_doorbell, &reg->outbound_doorbell_clear);
  1824. readl(&reg->outbound_doorbell_clear);
  1825. if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
  1826. arcmsr_iop2drv_data_wrote_handle(pACB);
  1827. if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
  1828. arcmsr_iop2drv_data_read_handle(pACB);
  1829. if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
  1830. arcmsr_hbaC_message_isr(pACB);
  1831. outbound_doorbell = readl(&reg->outbound_doorbell);
  1832. } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
  1833. | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
  1834. | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
  1835. }
  1836. static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
  1837. {
  1838. uint32_t outbound_doorbell;
  1839. struct MessageUnit_D *pmu = pACB->pmuD;
  1840. outbound_doorbell = readl(pmu->outbound_doorbell);
  1841. do {
  1842. writel(outbound_doorbell, pmu->outbound_doorbell);
  1843. if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
  1844. arcmsr_hbaD_message_isr(pACB);
  1845. if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
  1846. arcmsr_iop2drv_data_wrote_handle(pACB);
  1847. if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
  1848. arcmsr_iop2drv_data_read_handle(pACB);
  1849. outbound_doorbell = readl(pmu->outbound_doorbell);
  1850. } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
  1851. | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
  1852. | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
  1853. }
  1854. static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
  1855. {
  1856. uint32_t flag_ccb;
  1857. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1858. struct ARCMSR_CDB *pARCMSR_CDB;
  1859. struct CommandControlBlock *pCCB;
  1860. bool error;
  1861. while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
  1862. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
  1863. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  1864. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  1865. arcmsr_drain_donequeue(acb, pCCB, error);
  1866. }
  1867. }
  1868. static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
  1869. {
  1870. uint32_t index;
  1871. uint32_t flag_ccb;
  1872. struct MessageUnit_B *reg = acb->pmuB;
  1873. struct ARCMSR_CDB *pARCMSR_CDB;
  1874. struct CommandControlBlock *pCCB;
  1875. bool error;
  1876. index = reg->doneq_index;
  1877. while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
  1878. reg->done_qbuffer[index] = 0;
  1879. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
  1880. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  1881. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  1882. arcmsr_drain_donequeue(acb, pCCB, error);
  1883. index++;
  1884. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  1885. reg->doneq_index = index;
  1886. }
  1887. }
  1888. static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
  1889. {
  1890. struct MessageUnit_C __iomem *phbcmu;
  1891. struct ARCMSR_CDB *arcmsr_cdb;
  1892. struct CommandControlBlock *ccb;
  1893. uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
  1894. int error;
  1895. phbcmu = acb->pmuC;
  1896. /* areca cdb command done */
  1897. /* Use correct offset and size for syncing */
  1898. while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
  1899. 0xFFFFFFFF) {
  1900. ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
  1901. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
  1902. + ccb_cdb_phy);
  1903. ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
  1904. arcmsr_cdb);
  1905. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
  1906. ? true : false;
  1907. /* check if command done with no error */
  1908. arcmsr_drain_donequeue(acb, ccb, error);
  1909. throttling++;
  1910. if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
  1911. writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
  1912. &phbcmu->inbound_doorbell);
  1913. throttling = 0;
  1914. }
  1915. }
  1916. }
  1917. static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
  1918. {
  1919. u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
  1920. uint32_t addressLow, ccb_cdb_phy;
  1921. int error;
  1922. struct MessageUnit_D *pmu;
  1923. struct ARCMSR_CDB *arcmsr_cdb;
  1924. struct CommandControlBlock *ccb;
  1925. unsigned long flags;
  1926. spin_lock_irqsave(&acb->doneq_lock, flags);
  1927. pmu = acb->pmuD;
  1928. outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
  1929. doneq_index = pmu->doneq_index;
  1930. if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
  1931. do {
  1932. toggle = doneq_index & 0x4000;
  1933. index_stripped = (doneq_index & 0xFFF) + 1;
  1934. index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
  1935. pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
  1936. ((toggle ^ 0x4000) + 1);
  1937. doneq_index = pmu->doneq_index;
  1938. addressLow = pmu->done_qbuffer[doneq_index &
  1939. 0xFFF].addressLow;
  1940. ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
  1941. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
  1942. + ccb_cdb_phy);
  1943. ccb = container_of(arcmsr_cdb,
  1944. struct CommandControlBlock, arcmsr_cdb);
  1945. error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
  1946. ? true : false;
  1947. arcmsr_drain_donequeue(acb, ccb, error);
  1948. writel(doneq_index, pmu->outboundlist_read_pointer);
  1949. } while ((doneq_index & 0xFFF) !=
  1950. (outbound_write_pointer & 0xFFF));
  1951. }
  1952. writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
  1953. pmu->outboundlist_interrupt_cause);
  1954. readl(pmu->outboundlist_interrupt_cause);
  1955. spin_unlock_irqrestore(&acb->doneq_lock, flags);
  1956. }
  1957. /*
  1958. **********************************************************************************
  1959. ** Handle a message interrupt
  1960. **
  1961. ** The only message interrupt we expect is in response to a query for the current adapter config.
  1962. ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
  1963. **********************************************************************************
  1964. */
  1965. static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
  1966. {
  1967. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1968. /*clear interrupt and message state*/
  1969. writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
  1970. schedule_work(&acb->arcmsr_do_message_isr_bh);
  1971. }
  1972. static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
  1973. {
  1974. struct MessageUnit_B *reg = acb->pmuB;
  1975. /*clear interrupt and message state*/
  1976. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
  1977. schedule_work(&acb->arcmsr_do_message_isr_bh);
  1978. }
  1979. /*
  1980. **********************************************************************************
  1981. ** Handle a message interrupt
  1982. **
  1983. ** The only message interrupt we expect is in response to a query for the
  1984. ** current adapter config.
  1985. ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
  1986. **********************************************************************************
  1987. */
  1988. static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
  1989. {
  1990. struct MessageUnit_C __iomem *reg = acb->pmuC;
  1991. /*clear interrupt and message state*/
  1992. writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
  1993. schedule_work(&acb->arcmsr_do_message_isr_bh);
  1994. }
  1995. static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
  1996. {
  1997. struct MessageUnit_D *reg = acb->pmuD;
  1998. writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
  1999. readl(reg->outbound_doorbell);
  2000. schedule_work(&acb->arcmsr_do_message_isr_bh);
  2001. }
  2002. static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
  2003. {
  2004. uint32_t outbound_intstatus;
  2005. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2006. outbound_intstatus = readl(&reg->outbound_intstatus) &
  2007. acb->outbound_int_enable;
  2008. if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
  2009. return IRQ_NONE;
  2010. do {
  2011. writel(outbound_intstatus, &reg->outbound_intstatus);
  2012. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
  2013. arcmsr_hbaA_doorbell_isr(acb);
  2014. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
  2015. arcmsr_hbaA_postqueue_isr(acb);
  2016. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
  2017. arcmsr_hbaA_message_isr(acb);
  2018. outbound_intstatus = readl(&reg->outbound_intstatus) &
  2019. acb->outbound_int_enable;
  2020. } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
  2021. | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
  2022. | ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
  2023. return IRQ_HANDLED;
  2024. }
  2025. static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
  2026. {
  2027. uint32_t outbound_doorbell;
  2028. struct MessageUnit_B *reg = acb->pmuB;
  2029. outbound_doorbell = readl(reg->iop2drv_doorbell) &
  2030. acb->outbound_int_enable;
  2031. if (!outbound_doorbell)
  2032. return IRQ_NONE;
  2033. do {
  2034. writel(~outbound_doorbell, reg->iop2drv_doorbell);
  2035. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
  2036. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
  2037. arcmsr_iop2drv_data_wrote_handle(acb);
  2038. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
  2039. arcmsr_iop2drv_data_read_handle(acb);
  2040. if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
  2041. arcmsr_hbaB_postqueue_isr(acb);
  2042. if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
  2043. arcmsr_hbaB_message_isr(acb);
  2044. outbound_doorbell = readl(reg->iop2drv_doorbell) &
  2045. acb->outbound_int_enable;
  2046. } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
  2047. | ARCMSR_IOP2DRV_DATA_READ_OK
  2048. | ARCMSR_IOP2DRV_CDB_DONE
  2049. | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
  2050. return IRQ_HANDLED;
  2051. }
  2052. static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
  2053. {
  2054. uint32_t host_interrupt_status;
  2055. struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
  2056. /*
  2057. *********************************************
  2058. ** check outbound intstatus
  2059. *********************************************
  2060. */
  2061. host_interrupt_status = readl(&phbcmu->host_int_status) &
  2062. (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
  2063. ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
  2064. if (!host_interrupt_status)
  2065. return IRQ_NONE;
  2066. do {
  2067. if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
  2068. arcmsr_hbaC_doorbell_isr(pACB);
  2069. /* MU post queue interrupts*/
  2070. if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
  2071. arcmsr_hbaC_postqueue_isr(pACB);
  2072. host_interrupt_status = readl(&phbcmu->host_int_status);
  2073. } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
  2074. ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
  2075. return IRQ_HANDLED;
  2076. }
  2077. static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
  2078. {
  2079. u32 host_interrupt_status;
  2080. struct MessageUnit_D *pmu = pACB->pmuD;
  2081. host_interrupt_status = readl(pmu->host_int_status) &
  2082. (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
  2083. ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
  2084. if (!host_interrupt_status)
  2085. return IRQ_NONE;
  2086. do {
  2087. /* MU post queue interrupts*/
  2088. if (host_interrupt_status &
  2089. ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
  2090. arcmsr_hbaD_postqueue_isr(pACB);
  2091. if (host_interrupt_status &
  2092. ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
  2093. arcmsr_hbaD_doorbell_isr(pACB);
  2094. host_interrupt_status = readl(pmu->host_int_status);
  2095. } while (host_interrupt_status &
  2096. (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
  2097. ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
  2098. return IRQ_HANDLED;
  2099. }
  2100. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
  2101. {
  2102. switch (acb->adapter_type) {
  2103. case ACB_ADAPTER_TYPE_A:
  2104. return arcmsr_hbaA_handle_isr(acb);
  2105. break;
  2106. case ACB_ADAPTER_TYPE_B:
  2107. return arcmsr_hbaB_handle_isr(acb);
  2108. break;
  2109. case ACB_ADAPTER_TYPE_C:
  2110. return arcmsr_hbaC_handle_isr(acb);
  2111. case ACB_ADAPTER_TYPE_D:
  2112. return arcmsr_hbaD_handle_isr(acb);
  2113. default:
  2114. return IRQ_NONE;
  2115. }
  2116. }
  2117. static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
  2118. {
  2119. if (acb) {
  2120. /* stop adapter background rebuild */
  2121. if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
  2122. uint32_t intmask_org;
  2123. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  2124. intmask_org = arcmsr_disable_outbound_ints(acb);
  2125. arcmsr_stop_adapter_bgrb(acb);
  2126. arcmsr_flush_adapter_cache(acb);
  2127. arcmsr_enable_outbound_ints(acb, intmask_org);
  2128. }
  2129. }
  2130. }
  2131. void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
  2132. {
  2133. uint32_t i;
  2134. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  2135. for (i = 0; i < 15; i++) {
  2136. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  2137. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  2138. acb->rqbuf_getIndex = 0;
  2139. acb->rqbuf_putIndex = 0;
  2140. arcmsr_iop_message_read(acb);
  2141. mdelay(30);
  2142. } else if (acb->rqbuf_getIndex !=
  2143. acb->rqbuf_putIndex) {
  2144. acb->rqbuf_getIndex = 0;
  2145. acb->rqbuf_putIndex = 0;
  2146. mdelay(30);
  2147. } else
  2148. break;
  2149. }
  2150. }
  2151. }
  2152. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
  2153. struct scsi_cmnd *cmd)
  2154. {
  2155. char *buffer;
  2156. unsigned short use_sg;
  2157. int retvalue = 0, transfer_len = 0;
  2158. unsigned long flags;
  2159. struct CMD_MESSAGE_FIELD *pcmdmessagefld;
  2160. uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
  2161. (uint32_t)cmd->cmnd[6] << 16 |
  2162. (uint32_t)cmd->cmnd[7] << 8 |
  2163. (uint32_t)cmd->cmnd[8];
  2164. struct scatterlist *sg;
  2165. use_sg = scsi_sg_count(cmd);
  2166. sg = scsi_sglist(cmd);
  2167. buffer = kmap_atomic(sg_page(sg)) + sg->offset;
  2168. if (use_sg > 1) {
  2169. retvalue = ARCMSR_MESSAGE_FAIL;
  2170. goto message_out;
  2171. }
  2172. transfer_len += sg->length;
  2173. if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
  2174. retvalue = ARCMSR_MESSAGE_FAIL;
  2175. pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
  2176. goto message_out;
  2177. }
  2178. pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
  2179. switch (controlcode) {
  2180. case ARCMSR_MESSAGE_READ_RQBUFFER: {
  2181. unsigned char *ver_addr;
  2182. uint8_t *ptmpQbuffer;
  2183. uint32_t allxfer_len = 0;
  2184. ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
  2185. if (!ver_addr) {
  2186. retvalue = ARCMSR_MESSAGE_FAIL;
  2187. pr_info("%s: memory not enough!\n", __func__);
  2188. goto message_out;
  2189. }
  2190. ptmpQbuffer = ver_addr;
  2191. spin_lock_irqsave(&acb->rqbuffer_lock, flags);
  2192. if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
  2193. unsigned int tail = acb->rqbuf_getIndex;
  2194. unsigned int head = acb->rqbuf_putIndex;
  2195. unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
  2196. allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
  2197. if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
  2198. allxfer_len = ARCMSR_API_DATA_BUFLEN;
  2199. if (allxfer_len <= cnt_to_end)
  2200. memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
  2201. else {
  2202. memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
  2203. memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
  2204. }
  2205. acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
  2206. }
  2207. memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
  2208. allxfer_len);
  2209. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  2210. struct QBUFFER __iomem *prbuffer;
  2211. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  2212. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  2213. if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
  2214. acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
  2215. }
  2216. spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
  2217. kfree(ver_addr);
  2218. pcmdmessagefld->cmdmessage.Length = allxfer_len;
  2219. if (acb->fw_flag == FW_DEADLOCK)
  2220. pcmdmessagefld->cmdmessage.ReturnCode =
  2221. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  2222. else
  2223. pcmdmessagefld->cmdmessage.ReturnCode =
  2224. ARCMSR_MESSAGE_RETURNCODE_OK;
  2225. break;
  2226. }
  2227. case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
  2228. unsigned char *ver_addr;
  2229. uint32_t user_len;
  2230. int32_t cnt2end;
  2231. uint8_t *pQbuffer, *ptmpuserbuffer;
  2232. user_len = pcmdmessagefld->cmdmessage.Length;
  2233. if (user_len > ARCMSR_API_DATA_BUFLEN) {
  2234. retvalue = ARCMSR_MESSAGE_FAIL;
  2235. goto message_out;
  2236. }
  2237. ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
  2238. if (!ver_addr) {
  2239. retvalue = ARCMSR_MESSAGE_FAIL;
  2240. goto message_out;
  2241. }
  2242. ptmpuserbuffer = ver_addr;
  2243. memcpy(ptmpuserbuffer,
  2244. pcmdmessagefld->messagedatabuffer, user_len);
  2245. spin_lock_irqsave(&acb->wqbuffer_lock, flags);
  2246. if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
  2247. struct SENSE_DATA *sensebuffer =
  2248. (struct SENSE_DATA *)cmd->sense_buffer;
  2249. arcmsr_write_ioctldata2iop(acb);
  2250. /* has error report sensedata */
  2251. sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
  2252. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  2253. sensebuffer->AdditionalSenseLength = 0x0A;
  2254. sensebuffer->AdditionalSenseCode = 0x20;
  2255. sensebuffer->Valid = 1;
  2256. retvalue = ARCMSR_MESSAGE_FAIL;
  2257. } else {
  2258. pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
  2259. cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
  2260. if (user_len > cnt2end) {
  2261. memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
  2262. ptmpuserbuffer += cnt2end;
  2263. user_len -= cnt2end;
  2264. acb->wqbuf_putIndex = 0;
  2265. pQbuffer = acb->wqbuffer;
  2266. }
  2267. memcpy(pQbuffer, ptmpuserbuffer, user_len);
  2268. acb->wqbuf_putIndex += user_len;
  2269. acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
  2270. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
  2271. acb->acb_flags &=
  2272. ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
  2273. arcmsr_write_ioctldata2iop(acb);
  2274. }
  2275. }
  2276. spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
  2277. kfree(ver_addr);
  2278. if (acb->fw_flag == FW_DEADLOCK)
  2279. pcmdmessagefld->cmdmessage.ReturnCode =
  2280. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  2281. else
  2282. pcmdmessagefld->cmdmessage.ReturnCode =
  2283. ARCMSR_MESSAGE_RETURNCODE_OK;
  2284. break;
  2285. }
  2286. case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
  2287. uint8_t *pQbuffer = acb->rqbuffer;
  2288. arcmsr_clear_iop2drv_rqueue_buffer(acb);
  2289. spin_lock_irqsave(&acb->rqbuffer_lock, flags);
  2290. acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
  2291. acb->rqbuf_getIndex = 0;
  2292. acb->rqbuf_putIndex = 0;
  2293. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  2294. spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
  2295. if (acb->fw_flag == FW_DEADLOCK)
  2296. pcmdmessagefld->cmdmessage.ReturnCode =
  2297. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  2298. else
  2299. pcmdmessagefld->cmdmessage.ReturnCode =
  2300. ARCMSR_MESSAGE_RETURNCODE_OK;
  2301. break;
  2302. }
  2303. case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
  2304. uint8_t *pQbuffer = acb->wqbuffer;
  2305. spin_lock_irqsave(&acb->wqbuffer_lock, flags);
  2306. acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  2307. ACB_F_MESSAGE_WQBUFFER_READED);
  2308. acb->wqbuf_getIndex = 0;
  2309. acb->wqbuf_putIndex = 0;
  2310. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  2311. spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
  2312. if (acb->fw_flag == FW_DEADLOCK)
  2313. pcmdmessagefld->cmdmessage.ReturnCode =
  2314. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  2315. else
  2316. pcmdmessagefld->cmdmessage.ReturnCode =
  2317. ARCMSR_MESSAGE_RETURNCODE_OK;
  2318. break;
  2319. }
  2320. case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
  2321. uint8_t *pQbuffer;
  2322. arcmsr_clear_iop2drv_rqueue_buffer(acb);
  2323. spin_lock_irqsave(&acb->rqbuffer_lock, flags);
  2324. acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
  2325. acb->rqbuf_getIndex = 0;
  2326. acb->rqbuf_putIndex = 0;
  2327. pQbuffer = acb->rqbuffer;
  2328. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  2329. spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
  2330. spin_lock_irqsave(&acb->wqbuffer_lock, flags);
  2331. acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  2332. ACB_F_MESSAGE_WQBUFFER_READED);
  2333. acb->wqbuf_getIndex = 0;
  2334. acb->wqbuf_putIndex = 0;
  2335. pQbuffer = acb->wqbuffer;
  2336. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  2337. spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
  2338. if (acb->fw_flag == FW_DEADLOCK)
  2339. pcmdmessagefld->cmdmessage.ReturnCode =
  2340. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  2341. else
  2342. pcmdmessagefld->cmdmessage.ReturnCode =
  2343. ARCMSR_MESSAGE_RETURNCODE_OK;
  2344. break;
  2345. }
  2346. case ARCMSR_MESSAGE_RETURN_CODE_3F: {
  2347. if (acb->fw_flag == FW_DEADLOCK)
  2348. pcmdmessagefld->cmdmessage.ReturnCode =
  2349. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  2350. else
  2351. pcmdmessagefld->cmdmessage.ReturnCode =
  2352. ARCMSR_MESSAGE_RETURNCODE_3F;
  2353. break;
  2354. }
  2355. case ARCMSR_MESSAGE_SAY_HELLO: {
  2356. int8_t *hello_string = "Hello! I am ARCMSR";
  2357. if (acb->fw_flag == FW_DEADLOCK)
  2358. pcmdmessagefld->cmdmessage.ReturnCode =
  2359. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  2360. else
  2361. pcmdmessagefld->cmdmessage.ReturnCode =
  2362. ARCMSR_MESSAGE_RETURNCODE_OK;
  2363. memcpy(pcmdmessagefld->messagedatabuffer,
  2364. hello_string, (int16_t)strlen(hello_string));
  2365. break;
  2366. }
  2367. case ARCMSR_MESSAGE_SAY_GOODBYE: {
  2368. if (acb->fw_flag == FW_DEADLOCK)
  2369. pcmdmessagefld->cmdmessage.ReturnCode =
  2370. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  2371. else
  2372. pcmdmessagefld->cmdmessage.ReturnCode =
  2373. ARCMSR_MESSAGE_RETURNCODE_OK;
  2374. arcmsr_iop_parking(acb);
  2375. break;
  2376. }
  2377. case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
  2378. if (acb->fw_flag == FW_DEADLOCK)
  2379. pcmdmessagefld->cmdmessage.ReturnCode =
  2380. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  2381. else
  2382. pcmdmessagefld->cmdmessage.ReturnCode =
  2383. ARCMSR_MESSAGE_RETURNCODE_OK;
  2384. arcmsr_flush_adapter_cache(acb);
  2385. break;
  2386. }
  2387. default:
  2388. retvalue = ARCMSR_MESSAGE_FAIL;
  2389. pr_info("%s: unknown controlcode!\n", __func__);
  2390. }
  2391. message_out:
  2392. if (use_sg) {
  2393. struct scatterlist *sg = scsi_sglist(cmd);
  2394. kunmap_atomic(buffer - sg->offset);
  2395. }
  2396. return retvalue;
  2397. }
  2398. static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
  2399. {
  2400. struct list_head *head = &acb->ccb_free_list;
  2401. struct CommandControlBlock *ccb = NULL;
  2402. unsigned long flags;
  2403. spin_lock_irqsave(&acb->ccblist_lock, flags);
  2404. if (!list_empty(head)) {
  2405. ccb = list_entry(head->next, struct CommandControlBlock, list);
  2406. list_del_init(&ccb->list);
  2407. }else{
  2408. spin_unlock_irqrestore(&acb->ccblist_lock, flags);
  2409. return NULL;
  2410. }
  2411. spin_unlock_irqrestore(&acb->ccblist_lock, flags);
  2412. return ccb;
  2413. }
  2414. static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
  2415. struct scsi_cmnd *cmd)
  2416. {
  2417. switch (cmd->cmnd[0]) {
  2418. case INQUIRY: {
  2419. unsigned char inqdata[36];
  2420. char *buffer;
  2421. struct scatterlist *sg;
  2422. if (cmd->device->lun) {
  2423. cmd->result = (DID_TIME_OUT << 16);
  2424. cmd->scsi_done(cmd);
  2425. return;
  2426. }
  2427. inqdata[0] = TYPE_PROCESSOR;
  2428. /* Periph Qualifier & Periph Dev Type */
  2429. inqdata[1] = 0;
  2430. /* rem media bit & Dev Type Modifier */
  2431. inqdata[2] = 0;
  2432. /* ISO, ECMA, & ANSI versions */
  2433. inqdata[4] = 31;
  2434. /* length of additional data */
  2435. strncpy(&inqdata[8], "Areca ", 8);
  2436. /* Vendor Identification */
  2437. strncpy(&inqdata[16], "RAID controller ", 16);
  2438. /* Product Identification */
  2439. strncpy(&inqdata[32], "R001", 4); /* Product Revision */
  2440. sg = scsi_sglist(cmd);
  2441. buffer = kmap_atomic(sg_page(sg)) + sg->offset;
  2442. memcpy(buffer, inqdata, sizeof(inqdata));
  2443. sg = scsi_sglist(cmd);
  2444. kunmap_atomic(buffer - sg->offset);
  2445. cmd->scsi_done(cmd);
  2446. }
  2447. break;
  2448. case WRITE_BUFFER:
  2449. case READ_BUFFER: {
  2450. if (arcmsr_iop_message_xfer(acb, cmd))
  2451. cmd->result = (DID_ERROR << 16);
  2452. cmd->scsi_done(cmd);
  2453. }
  2454. break;
  2455. default:
  2456. cmd->scsi_done(cmd);
  2457. }
  2458. }
  2459. static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
  2460. void (* done)(struct scsi_cmnd *))
  2461. {
  2462. struct Scsi_Host *host = cmd->device->host;
  2463. struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
  2464. struct CommandControlBlock *ccb;
  2465. int target = cmd->device->id;
  2466. cmd->scsi_done = done;
  2467. cmd->host_scribble = NULL;
  2468. cmd->result = 0;
  2469. if (target == 16) {
  2470. /* virtual device for iop message transfer */
  2471. arcmsr_handle_virtual_command(acb, cmd);
  2472. return 0;
  2473. }
  2474. ccb = arcmsr_get_freeccb(acb);
  2475. if (!ccb)
  2476. return SCSI_MLQUEUE_HOST_BUSY;
  2477. if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
  2478. cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
  2479. cmd->scsi_done(cmd);
  2480. return 0;
  2481. }
  2482. arcmsr_post_ccb(acb, ccb);
  2483. return 0;
  2484. }
  2485. static DEF_SCSI_QCMD(arcmsr_queue_command)
  2486. static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
  2487. {
  2488. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2489. char *acb_firm_model = acb->firm_model;
  2490. char *acb_firm_version = acb->firm_version;
  2491. char *acb_device_map = acb->device_map;
  2492. char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
  2493. char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
  2494. char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
  2495. int count;
  2496. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  2497. if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
  2498. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  2499. miscellaneous data' timeout \n", acb->host->host_no);
  2500. return false;
  2501. }
  2502. count = 8;
  2503. while (count){
  2504. *acb_firm_model = readb(iop_firm_model);
  2505. acb_firm_model++;
  2506. iop_firm_model++;
  2507. count--;
  2508. }
  2509. count = 16;
  2510. while (count){
  2511. *acb_firm_version = readb(iop_firm_version);
  2512. acb_firm_version++;
  2513. iop_firm_version++;
  2514. count--;
  2515. }
  2516. count=16;
  2517. while(count){
  2518. *acb_device_map = readb(iop_device_map);
  2519. acb_device_map++;
  2520. iop_device_map++;
  2521. count--;
  2522. }
  2523. pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
  2524. acb->host->host_no,
  2525. acb->firm_model,
  2526. acb->firm_version);
  2527. acb->signature = readl(&reg->message_rwbuffer[0]);
  2528. acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
  2529. acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
  2530. acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
  2531. acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
  2532. acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
  2533. return true;
  2534. }
  2535. static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
  2536. {
  2537. struct MessageUnit_B *reg = acb->pmuB;
  2538. char *acb_firm_model = acb->firm_model;
  2539. char *acb_firm_version = acb->firm_version;
  2540. char *acb_device_map = acb->device_map;
  2541. char __iomem *iop_firm_model;
  2542. /*firm_model,15,60-67*/
  2543. char __iomem *iop_firm_version;
  2544. /*firm_version,17,68-83*/
  2545. char __iomem *iop_device_map;
  2546. /*firm_version,21,84-99*/
  2547. int count;
  2548. iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); /*firm_model,15,60-67*/
  2549. iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); /*firm_version,17,68-83*/
  2550. iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
  2551. arcmsr_wait_firmware_ready(acb);
  2552. writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
  2553. if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
  2554. printk(KERN_ERR "arcmsr%d: can't set driver mode.\n", acb->host->host_no);
  2555. return false;
  2556. }
  2557. writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
  2558. if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
  2559. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  2560. miscellaneous data' timeout \n", acb->host->host_no);
  2561. return false;
  2562. }
  2563. count = 8;
  2564. while (count){
  2565. *acb_firm_model = readb(iop_firm_model);
  2566. acb_firm_model++;
  2567. iop_firm_model++;
  2568. count--;
  2569. }
  2570. count = 16;
  2571. while (count){
  2572. *acb_firm_version = readb(iop_firm_version);
  2573. acb_firm_version++;
  2574. iop_firm_version++;
  2575. count--;
  2576. }
  2577. count = 16;
  2578. while(count){
  2579. *acb_device_map = readb(iop_device_map);
  2580. acb_device_map++;
  2581. iop_device_map++;
  2582. count--;
  2583. }
  2584. pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
  2585. acb->host->host_no,
  2586. acb->firm_model,
  2587. acb->firm_version);
  2588. acb->signature = readl(&reg->message_rwbuffer[0]);
  2589. /*firm_signature,1,00-03*/
  2590. acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
  2591. /*firm_request_len,1,04-07*/
  2592. acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
  2593. /*firm_numbers_queue,2,08-11*/
  2594. acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
  2595. /*firm_sdram_size,3,12-15*/
  2596. acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
  2597. /*firm_ide_channels,4,16-19*/
  2598. acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
  2599. /*firm_ide_channels,4,16-19*/
  2600. return true;
  2601. }
  2602. static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
  2603. {
  2604. uint32_t intmask_org, Index, firmware_state = 0;
  2605. struct MessageUnit_C __iomem *reg = pACB->pmuC;
  2606. char *acb_firm_model = pACB->firm_model;
  2607. char *acb_firm_version = pACB->firm_version;
  2608. char __iomem *iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
  2609. char __iomem *iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
  2610. int count;
  2611. /* disable all outbound interrupt */
  2612. intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
  2613. writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
  2614. /* wait firmware ready */
  2615. do {
  2616. firmware_state = readl(&reg->outbound_msgaddr1);
  2617. } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
  2618. /* post "get config" instruction */
  2619. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  2620. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  2621. /* wait message ready */
  2622. for (Index = 0; Index < 2000; Index++) {
  2623. if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
  2624. writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
  2625. break;
  2626. }
  2627. udelay(10);
  2628. } /*max 1 seconds*/
  2629. if (Index >= 2000) {
  2630. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  2631. miscellaneous data' timeout \n", pACB->host->host_no);
  2632. return false;
  2633. }
  2634. count = 8;
  2635. while (count) {
  2636. *acb_firm_model = readb(iop_firm_model);
  2637. acb_firm_model++;
  2638. iop_firm_model++;
  2639. count--;
  2640. }
  2641. count = 16;
  2642. while (count) {
  2643. *acb_firm_version = readb(iop_firm_version);
  2644. acb_firm_version++;
  2645. iop_firm_version++;
  2646. count--;
  2647. }
  2648. pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
  2649. pACB->host->host_no,
  2650. pACB->firm_model,
  2651. pACB->firm_version);
  2652. pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
  2653. pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
  2654. pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
  2655. pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
  2656. pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
  2657. /*all interrupt service will be enable at arcmsr_iop_init*/
  2658. return true;
  2659. }
  2660. static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
  2661. {
  2662. char *acb_firm_model = acb->firm_model;
  2663. char *acb_firm_version = acb->firm_version;
  2664. char *acb_device_map = acb->device_map;
  2665. char __iomem *iop_firm_model;
  2666. char __iomem *iop_firm_version;
  2667. char __iomem *iop_device_map;
  2668. u32 count;
  2669. struct MessageUnit_D *reg = acb->pmuD;
  2670. iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);
  2671. iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);
  2672. iop_device_map = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
  2673. if (readl(acb->pmuD->outbound_doorbell) &
  2674. ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
  2675. writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
  2676. acb->pmuD->outbound_doorbell);/*clear interrupt*/
  2677. }
  2678. /* post "get config" instruction */
  2679. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
  2680. /* wait message ready */
  2681. if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
  2682. pr_notice("arcmsr%d: wait get adapter firmware "
  2683. "miscellaneous data timeout\n", acb->host->host_no);
  2684. return false;
  2685. }
  2686. count = 8;
  2687. while (count) {
  2688. *acb_firm_model = readb(iop_firm_model);
  2689. acb_firm_model++;
  2690. iop_firm_model++;
  2691. count--;
  2692. }
  2693. count = 16;
  2694. while (count) {
  2695. *acb_firm_version = readb(iop_firm_version);
  2696. acb_firm_version++;
  2697. iop_firm_version++;
  2698. count--;
  2699. }
  2700. count = 16;
  2701. while (count) {
  2702. *acb_device_map = readb(iop_device_map);
  2703. acb_device_map++;
  2704. iop_device_map++;
  2705. count--;
  2706. }
  2707. acb->signature = readl(&reg->msgcode_rwbuffer[0]);
  2708. /*firm_signature,1,00-03*/
  2709. acb->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);
  2710. /*firm_request_len,1,04-07*/
  2711. acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]);
  2712. /*firm_numbers_queue,2,08-11*/
  2713. acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);
  2714. /*firm_sdram_size,3,12-15*/
  2715. acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]);
  2716. /*firm_hd_channels,4,16-19*/
  2717. acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
  2718. pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
  2719. acb->host->host_no,
  2720. acb->firm_model,
  2721. acb->firm_version);
  2722. return true;
  2723. }
  2724. static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
  2725. {
  2726. bool rtn = false;
  2727. switch (acb->adapter_type) {
  2728. case ACB_ADAPTER_TYPE_A:
  2729. rtn = arcmsr_hbaA_get_config(acb);
  2730. break;
  2731. case ACB_ADAPTER_TYPE_B:
  2732. rtn = arcmsr_hbaB_get_config(acb);
  2733. break;
  2734. case ACB_ADAPTER_TYPE_C:
  2735. rtn = arcmsr_hbaC_get_config(acb);
  2736. break;
  2737. case ACB_ADAPTER_TYPE_D:
  2738. rtn = arcmsr_hbaD_get_config(acb);
  2739. break;
  2740. default:
  2741. break;
  2742. }
  2743. if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
  2744. acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD;
  2745. else
  2746. acb->maxOutstanding = acb->firm_numbers_queue - 1;
  2747. acb->host->can_queue = acb->maxOutstanding;
  2748. return rtn;
  2749. }
  2750. static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
  2751. struct CommandControlBlock *poll_ccb)
  2752. {
  2753. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2754. struct CommandControlBlock *ccb;
  2755. struct ARCMSR_CDB *arcmsr_cdb;
  2756. uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
  2757. int rtn;
  2758. bool error;
  2759. polling_hba_ccb_retry:
  2760. poll_count++;
  2761. outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
  2762. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  2763. while (1) {
  2764. if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
  2765. if (poll_ccb_done){
  2766. rtn = SUCCESS;
  2767. break;
  2768. }else {
  2769. msleep(25);
  2770. if (poll_count > 100){
  2771. rtn = FAILED;
  2772. break;
  2773. }
  2774. goto polling_hba_ccb_retry;
  2775. }
  2776. }
  2777. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
  2778. ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
  2779. poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
  2780. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  2781. if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
  2782. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  2783. " poll command abort successfully \n"
  2784. , acb->host->host_no
  2785. , ccb->pcmd->device->id
  2786. , (u32)ccb->pcmd->device->lun
  2787. , ccb);
  2788. ccb->pcmd->result = DID_ABORT << 16;
  2789. arcmsr_ccb_complete(ccb);
  2790. continue;
  2791. }
  2792. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  2793. " command done ccb = '0x%p'"
  2794. "ccboutstandingcount = %d \n"
  2795. , acb->host->host_no
  2796. , ccb
  2797. , atomic_read(&acb->ccboutstandingcount));
  2798. continue;
  2799. }
  2800. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  2801. arcmsr_report_ccb_state(acb, ccb, error);
  2802. }
  2803. return rtn;
  2804. }
  2805. static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
  2806. struct CommandControlBlock *poll_ccb)
  2807. {
  2808. struct MessageUnit_B *reg = acb->pmuB;
  2809. struct ARCMSR_CDB *arcmsr_cdb;
  2810. struct CommandControlBlock *ccb;
  2811. uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
  2812. int index, rtn;
  2813. bool error;
  2814. polling_hbb_ccb_retry:
  2815. poll_count++;
  2816. /* clear doorbell interrupt */
  2817. writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
  2818. while(1){
  2819. index = reg->doneq_index;
  2820. flag_ccb = reg->done_qbuffer[index];
  2821. if (flag_ccb == 0) {
  2822. if (poll_ccb_done){
  2823. rtn = SUCCESS;
  2824. break;
  2825. }else {
  2826. msleep(25);
  2827. if (poll_count > 100){
  2828. rtn = FAILED;
  2829. break;
  2830. }
  2831. goto polling_hbb_ccb_retry;
  2832. }
  2833. }
  2834. reg->done_qbuffer[index] = 0;
  2835. index++;
  2836. /*if last index number set it to 0 */
  2837. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  2838. reg->doneq_index = index;
  2839. /* check if command done with no error*/
  2840. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
  2841. ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
  2842. poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
  2843. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  2844. if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
  2845. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  2846. " poll command abort successfully \n"
  2847. ,acb->host->host_no
  2848. ,ccb->pcmd->device->id
  2849. ,(u32)ccb->pcmd->device->lun
  2850. ,ccb);
  2851. ccb->pcmd->result = DID_ABORT << 16;
  2852. arcmsr_ccb_complete(ccb);
  2853. continue;
  2854. }
  2855. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  2856. " command done ccb = '0x%p'"
  2857. "ccboutstandingcount = %d \n"
  2858. , acb->host->host_no
  2859. , ccb
  2860. , atomic_read(&acb->ccboutstandingcount));
  2861. continue;
  2862. }
  2863. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  2864. arcmsr_report_ccb_state(acb, ccb, error);
  2865. }
  2866. return rtn;
  2867. }
  2868. static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
  2869. struct CommandControlBlock *poll_ccb)
  2870. {
  2871. struct MessageUnit_C __iomem *reg = acb->pmuC;
  2872. uint32_t flag_ccb, ccb_cdb_phy;
  2873. struct ARCMSR_CDB *arcmsr_cdb;
  2874. bool error;
  2875. struct CommandControlBlock *pCCB;
  2876. uint32_t poll_ccb_done = 0, poll_count = 0;
  2877. int rtn;
  2878. polling_hbc_ccb_retry:
  2879. poll_count++;
  2880. while (1) {
  2881. if ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
  2882. if (poll_ccb_done) {
  2883. rtn = SUCCESS;
  2884. break;
  2885. } else {
  2886. msleep(25);
  2887. if (poll_count > 100) {
  2888. rtn = FAILED;
  2889. break;
  2890. }
  2891. goto polling_hbc_ccb_retry;
  2892. }
  2893. }
  2894. flag_ccb = readl(&reg->outbound_queueport_low);
  2895. ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
  2896. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
  2897. pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
  2898. poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
  2899. /* check ifcommand done with no error*/
  2900. if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
  2901. if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
  2902. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  2903. " poll command abort successfully \n"
  2904. , acb->host->host_no
  2905. , pCCB->pcmd->device->id
  2906. , (u32)pCCB->pcmd->device->lun
  2907. , pCCB);
  2908. pCCB->pcmd->result = DID_ABORT << 16;
  2909. arcmsr_ccb_complete(pCCB);
  2910. continue;
  2911. }
  2912. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  2913. " command done ccb = '0x%p'"
  2914. "ccboutstandingcount = %d \n"
  2915. , acb->host->host_no
  2916. , pCCB
  2917. , atomic_read(&acb->ccboutstandingcount));
  2918. continue;
  2919. }
  2920. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
  2921. arcmsr_report_ccb_state(acb, pCCB, error);
  2922. }
  2923. return rtn;
  2924. }
  2925. static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
  2926. struct CommandControlBlock *poll_ccb)
  2927. {
  2928. bool error;
  2929. uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy;
  2930. int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
  2931. unsigned long flags;
  2932. struct ARCMSR_CDB *arcmsr_cdb;
  2933. struct CommandControlBlock *pCCB;
  2934. struct MessageUnit_D *pmu = acb->pmuD;
  2935. polling_hbaD_ccb_retry:
  2936. poll_count++;
  2937. while (1) {
  2938. spin_lock_irqsave(&acb->doneq_lock, flags);
  2939. outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
  2940. doneq_index = pmu->doneq_index;
  2941. if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) {
  2942. spin_unlock_irqrestore(&acb->doneq_lock, flags);
  2943. if (poll_ccb_done) {
  2944. rtn = SUCCESS;
  2945. break;
  2946. } else {
  2947. msleep(25);
  2948. if (poll_count > 40) {
  2949. rtn = FAILED;
  2950. break;
  2951. }
  2952. goto polling_hbaD_ccb_retry;
  2953. }
  2954. }
  2955. toggle = doneq_index & 0x4000;
  2956. index_stripped = (doneq_index & 0xFFF) + 1;
  2957. index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
  2958. pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
  2959. ((toggle ^ 0x4000) + 1);
  2960. doneq_index = pmu->doneq_index;
  2961. spin_unlock_irqrestore(&acb->doneq_lock, flags);
  2962. flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
  2963. ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
  2964. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
  2965. ccb_cdb_phy);
  2966. pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
  2967. arcmsr_cdb);
  2968. poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
  2969. if ((pCCB->acb != acb) ||
  2970. (pCCB->startdone != ARCMSR_CCB_START)) {
  2971. if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
  2972. pr_notice("arcmsr%d: scsi id = %d "
  2973. "lun = %d ccb = '0x%p' poll command "
  2974. "abort successfully\n"
  2975. , acb->host->host_no
  2976. , pCCB->pcmd->device->id
  2977. , (u32)pCCB->pcmd->device->lun
  2978. , pCCB);
  2979. pCCB->pcmd->result = DID_ABORT << 16;
  2980. arcmsr_ccb_complete(pCCB);
  2981. continue;
  2982. }
  2983. pr_notice("arcmsr%d: polling an illegal "
  2984. "ccb command done ccb = '0x%p' "
  2985. "ccboutstandingcount = %d\n"
  2986. , acb->host->host_no
  2987. , pCCB
  2988. , atomic_read(&acb->ccboutstandingcount));
  2989. continue;
  2990. }
  2991. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
  2992. ? true : false;
  2993. arcmsr_report_ccb_state(acb, pCCB, error);
  2994. }
  2995. return rtn;
  2996. }
  2997. static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
  2998. struct CommandControlBlock *poll_ccb)
  2999. {
  3000. int rtn = 0;
  3001. switch (acb->adapter_type) {
  3002. case ACB_ADAPTER_TYPE_A: {
  3003. rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
  3004. }
  3005. break;
  3006. case ACB_ADAPTER_TYPE_B: {
  3007. rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
  3008. }
  3009. break;
  3010. case ACB_ADAPTER_TYPE_C: {
  3011. rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
  3012. }
  3013. break;
  3014. case ACB_ADAPTER_TYPE_D:
  3015. rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
  3016. break;
  3017. }
  3018. return rtn;
  3019. }
  3020. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
  3021. {
  3022. uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
  3023. dma_addr_t dma_coherent_handle;
  3024. /*
  3025. ********************************************************************
  3026. ** here we need to tell iop 331 our freeccb.HighPart
  3027. ** if freeccb.HighPart is not zero
  3028. ********************************************************************
  3029. */
  3030. switch (acb->adapter_type) {
  3031. case ACB_ADAPTER_TYPE_B:
  3032. case ACB_ADAPTER_TYPE_D:
  3033. dma_coherent_handle = acb->dma_coherent_handle2;
  3034. break;
  3035. default:
  3036. dma_coherent_handle = acb->dma_coherent_handle;
  3037. break;
  3038. }
  3039. cdb_phyaddr = lower_32_bits(dma_coherent_handle);
  3040. cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
  3041. acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
  3042. /*
  3043. ***********************************************************************
  3044. ** if adapter type B, set window of "post command Q"
  3045. ***********************************************************************
  3046. */
  3047. switch (acb->adapter_type) {
  3048. case ACB_ADAPTER_TYPE_A: {
  3049. if (cdb_phyaddr_hi32 != 0) {
  3050. struct MessageUnit_A __iomem *reg = acb->pmuA;
  3051. writel(ARCMSR_SIGNATURE_SET_CONFIG, \
  3052. &reg->message_rwbuffer[0]);
  3053. writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]);
  3054. writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
  3055. &reg->inbound_msgaddr0);
  3056. if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
  3057. printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
  3058. part physical address timeout\n",
  3059. acb->host->host_no);
  3060. return 1;
  3061. }
  3062. }
  3063. }
  3064. break;
  3065. case ACB_ADAPTER_TYPE_B: {
  3066. uint32_t __iomem *rwbuffer;
  3067. struct MessageUnit_B *reg = acb->pmuB;
  3068. reg->postq_index = 0;
  3069. reg->doneq_index = 0;
  3070. writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
  3071. if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
  3072. printk(KERN_NOTICE "arcmsr%d: cannot set driver mode\n", \
  3073. acb->host->host_no);
  3074. return 1;
  3075. }
  3076. rwbuffer = reg->message_rwbuffer;
  3077. /* driver "set config" signature */
  3078. writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
  3079. /* normal should be zero */
  3080. writel(cdb_phyaddr_hi32, rwbuffer++);
  3081. /* postQ size (256 + 8)*4 */
  3082. writel(cdb_phyaddr, rwbuffer++);
  3083. /* doneQ size (256 + 8)*4 */
  3084. writel(cdb_phyaddr + 1056, rwbuffer++);
  3085. /* ccb maxQ size must be --> [(256 + 8)*4]*/
  3086. writel(1056, rwbuffer);
  3087. writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
  3088. if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
  3089. printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
  3090. timeout \n",acb->host->host_no);
  3091. return 1;
  3092. }
  3093. writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
  3094. if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
  3095. pr_err("arcmsr%d: can't set driver mode.\n",
  3096. acb->host->host_no);
  3097. return 1;
  3098. }
  3099. }
  3100. break;
  3101. case ACB_ADAPTER_TYPE_C: {
  3102. if (cdb_phyaddr_hi32 != 0) {
  3103. struct MessageUnit_C __iomem *reg = acb->pmuC;
  3104. printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
  3105. acb->adapter_index, cdb_phyaddr_hi32);
  3106. writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
  3107. writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
  3108. writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
  3109. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  3110. if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
  3111. printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
  3112. timeout \n", acb->host->host_no);
  3113. return 1;
  3114. }
  3115. }
  3116. }
  3117. break;
  3118. case ACB_ADAPTER_TYPE_D: {
  3119. uint32_t __iomem *rwbuffer;
  3120. struct MessageUnit_D *reg = acb->pmuD;
  3121. reg->postq_index = 0;
  3122. reg->doneq_index = 0;
  3123. rwbuffer = reg->msgcode_rwbuffer;
  3124. writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
  3125. writel(cdb_phyaddr_hi32, rwbuffer++);
  3126. writel(cdb_phyaddr, rwbuffer++);
  3127. writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE *
  3128. sizeof(struct InBound_SRB)), rwbuffer++);
  3129. writel(0x100, rwbuffer);
  3130. writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0);
  3131. if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
  3132. pr_notice("arcmsr%d: 'set command Q window' timeout\n",
  3133. acb->host->host_no);
  3134. return 1;
  3135. }
  3136. }
  3137. break;
  3138. }
  3139. return 0;
  3140. }
  3141. static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
  3142. {
  3143. uint32_t firmware_state = 0;
  3144. switch (acb->adapter_type) {
  3145. case ACB_ADAPTER_TYPE_A: {
  3146. struct MessageUnit_A __iomem *reg = acb->pmuA;
  3147. do {
  3148. firmware_state = readl(&reg->outbound_msgaddr1);
  3149. } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
  3150. }
  3151. break;
  3152. case ACB_ADAPTER_TYPE_B: {
  3153. struct MessageUnit_B *reg = acb->pmuB;
  3154. do {
  3155. firmware_state = readl(reg->iop2drv_doorbell);
  3156. } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
  3157. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
  3158. }
  3159. break;
  3160. case ACB_ADAPTER_TYPE_C: {
  3161. struct MessageUnit_C __iomem *reg = acb->pmuC;
  3162. do {
  3163. firmware_state = readl(&reg->outbound_msgaddr1);
  3164. } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
  3165. }
  3166. break;
  3167. case ACB_ADAPTER_TYPE_D: {
  3168. struct MessageUnit_D *reg = acb->pmuD;
  3169. do {
  3170. firmware_state = readl(reg->outbound_msgaddr1);
  3171. } while ((firmware_state &
  3172. ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
  3173. }
  3174. break;
  3175. }
  3176. }
  3177. static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb)
  3178. {
  3179. struct MessageUnit_A __iomem *reg = acb->pmuA;
  3180. if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
  3181. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  3182. return;
  3183. } else {
  3184. acb->fw_flag = FW_NORMAL;
  3185. if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
  3186. atomic_set(&acb->rq_map_token, 16);
  3187. }
  3188. atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
  3189. if (atomic_dec_and_test(&acb->rq_map_token)) {
  3190. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  3191. return;
  3192. }
  3193. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  3194. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  3195. }
  3196. return;
  3197. }
  3198. static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
  3199. {
  3200. struct MessageUnit_B *reg = acb->pmuB;
  3201. if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
  3202. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  3203. return;
  3204. } else {
  3205. acb->fw_flag = FW_NORMAL;
  3206. if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
  3207. atomic_set(&acb->rq_map_token, 16);
  3208. }
  3209. atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
  3210. if (atomic_dec_and_test(&acb->rq_map_token)) {
  3211. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  3212. return;
  3213. }
  3214. writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
  3215. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  3216. }
  3217. return;
  3218. }
  3219. static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
  3220. {
  3221. struct MessageUnit_C __iomem *reg = acb->pmuC;
  3222. if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
  3223. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  3224. return;
  3225. } else {
  3226. acb->fw_flag = FW_NORMAL;
  3227. if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
  3228. atomic_set(&acb->rq_map_token, 16);
  3229. }
  3230. atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
  3231. if (atomic_dec_and_test(&acb->rq_map_token)) {
  3232. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  3233. return;
  3234. }
  3235. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  3236. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  3237. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  3238. }
  3239. return;
  3240. }
  3241. static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
  3242. {
  3243. struct MessageUnit_D *reg = acb->pmuD;
  3244. if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
  3245. ((acb->acb_flags & ACB_F_BUS_RESET) != 0) ||
  3246. ((acb->acb_flags & ACB_F_ABORT) != 0)) {
  3247. mod_timer(&acb->eternal_timer,
  3248. jiffies + msecs_to_jiffies(6 * HZ));
  3249. } else {
  3250. acb->fw_flag = FW_NORMAL;
  3251. if (atomic_read(&acb->ante_token_value) ==
  3252. atomic_read(&acb->rq_map_token)) {
  3253. atomic_set(&acb->rq_map_token, 16);
  3254. }
  3255. atomic_set(&acb->ante_token_value,
  3256. atomic_read(&acb->rq_map_token));
  3257. if (atomic_dec_and_test(&acb->rq_map_token)) {
  3258. mod_timer(&acb->eternal_timer, jiffies +
  3259. msecs_to_jiffies(6 * HZ));
  3260. return;
  3261. }
  3262. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG,
  3263. reg->inbound_msgaddr0);
  3264. mod_timer(&acb->eternal_timer, jiffies +
  3265. msecs_to_jiffies(6 * HZ));
  3266. }
  3267. }
  3268. static void arcmsr_request_device_map(unsigned long pacb)
  3269. {
  3270. struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
  3271. switch (acb->adapter_type) {
  3272. case ACB_ADAPTER_TYPE_A: {
  3273. arcmsr_hbaA_request_device_map(acb);
  3274. }
  3275. break;
  3276. case ACB_ADAPTER_TYPE_B: {
  3277. arcmsr_hbaB_request_device_map(acb);
  3278. }
  3279. break;
  3280. case ACB_ADAPTER_TYPE_C: {
  3281. arcmsr_hbaC_request_device_map(acb);
  3282. }
  3283. break;
  3284. case ACB_ADAPTER_TYPE_D:
  3285. arcmsr_hbaD_request_device_map(acb);
  3286. break;
  3287. }
  3288. }
  3289. static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
  3290. {
  3291. struct MessageUnit_A __iomem *reg = acb->pmuA;
  3292. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  3293. writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
  3294. if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
  3295. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  3296. rebulid' timeout \n", acb->host->host_no);
  3297. }
  3298. }
  3299. static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
  3300. {
  3301. struct MessageUnit_B *reg = acb->pmuB;
  3302. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  3303. writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
  3304. if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
  3305. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  3306. rebulid' timeout \n",acb->host->host_no);
  3307. }
  3308. }
  3309. static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
  3310. {
  3311. struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
  3312. pACB->acb_flags |= ACB_F_MSG_START_BGRB;
  3313. writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
  3314. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
  3315. if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
  3316. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  3317. rebulid' timeout \n", pACB->host->host_no);
  3318. }
  3319. return;
  3320. }
  3321. static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
  3322. {
  3323. struct MessageUnit_D *pmu = pACB->pmuD;
  3324. pACB->acb_flags |= ACB_F_MSG_START_BGRB;
  3325. writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
  3326. if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
  3327. pr_notice("arcmsr%d: wait 'start adapter "
  3328. "background rebulid' timeout\n", pACB->host->host_no);
  3329. }
  3330. }
  3331. static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
  3332. {
  3333. switch (acb->adapter_type) {
  3334. case ACB_ADAPTER_TYPE_A:
  3335. arcmsr_hbaA_start_bgrb(acb);
  3336. break;
  3337. case ACB_ADAPTER_TYPE_B:
  3338. arcmsr_hbaB_start_bgrb(acb);
  3339. break;
  3340. case ACB_ADAPTER_TYPE_C:
  3341. arcmsr_hbaC_start_bgrb(acb);
  3342. break;
  3343. case ACB_ADAPTER_TYPE_D:
  3344. arcmsr_hbaD_start_bgrb(acb);
  3345. break;
  3346. }
  3347. }
  3348. static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
  3349. {
  3350. switch (acb->adapter_type) {
  3351. case ACB_ADAPTER_TYPE_A: {
  3352. struct MessageUnit_A __iomem *reg = acb->pmuA;
  3353. uint32_t outbound_doorbell;
  3354. /* empty doorbell Qbuffer if door bell ringed */
  3355. outbound_doorbell = readl(&reg->outbound_doorbell);
  3356. /*clear doorbell interrupt */
  3357. writel(outbound_doorbell, &reg->outbound_doorbell);
  3358. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  3359. }
  3360. break;
  3361. case ACB_ADAPTER_TYPE_B: {
  3362. struct MessageUnit_B *reg = acb->pmuB;
  3363. /*clear interrupt and message state*/
  3364. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
  3365. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
  3366. /* let IOP know data has been read */
  3367. }
  3368. break;
  3369. case ACB_ADAPTER_TYPE_C: {
  3370. struct MessageUnit_C __iomem *reg = acb->pmuC;
  3371. uint32_t outbound_doorbell, i;
  3372. /* empty doorbell Qbuffer if door bell ringed */
  3373. outbound_doorbell = readl(&reg->outbound_doorbell);
  3374. writel(outbound_doorbell, &reg->outbound_doorbell_clear);
  3375. writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
  3376. for (i = 0; i < 200; i++) {
  3377. msleep(20);
  3378. outbound_doorbell = readl(&reg->outbound_doorbell);
  3379. if (outbound_doorbell &
  3380. ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
  3381. writel(outbound_doorbell,
  3382. &reg->outbound_doorbell_clear);
  3383. writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
  3384. &reg->inbound_doorbell);
  3385. } else
  3386. break;
  3387. }
  3388. }
  3389. break;
  3390. case ACB_ADAPTER_TYPE_D: {
  3391. struct MessageUnit_D *reg = acb->pmuD;
  3392. uint32_t outbound_doorbell, i;
  3393. /* empty doorbell Qbuffer if door bell ringed */
  3394. outbound_doorbell = readl(reg->outbound_doorbell);
  3395. writel(outbound_doorbell, reg->outbound_doorbell);
  3396. writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
  3397. reg->inbound_doorbell);
  3398. for (i = 0; i < 200; i++) {
  3399. msleep(20);
  3400. outbound_doorbell = readl(reg->outbound_doorbell);
  3401. if (outbound_doorbell &
  3402. ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
  3403. writel(outbound_doorbell,
  3404. reg->outbound_doorbell);
  3405. writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
  3406. reg->inbound_doorbell);
  3407. } else
  3408. break;
  3409. }
  3410. }
  3411. break;
  3412. }
  3413. }
  3414. static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
  3415. {
  3416. switch (acb->adapter_type) {
  3417. case ACB_ADAPTER_TYPE_A:
  3418. return;
  3419. case ACB_ADAPTER_TYPE_B:
  3420. {
  3421. struct MessageUnit_B *reg = acb->pmuB;
  3422. writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
  3423. if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
  3424. printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
  3425. return;
  3426. }
  3427. }
  3428. break;
  3429. case ACB_ADAPTER_TYPE_C:
  3430. return;
  3431. }
  3432. return;
  3433. }
  3434. static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
  3435. {
  3436. uint8_t value[64];
  3437. int i, count = 0;
  3438. struct MessageUnit_A __iomem *pmuA = acb->pmuA;
  3439. struct MessageUnit_C __iomem *pmuC = acb->pmuC;
  3440. struct MessageUnit_D *pmuD = acb->pmuD;
  3441. /* backup pci config data */
  3442. printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
  3443. for (i = 0; i < 64; i++) {
  3444. pci_read_config_byte(acb->pdev, i, &value[i]);
  3445. }
  3446. /* hardware reset signal */
  3447. if ((acb->dev_id == 0x1680)) {
  3448. writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
  3449. } else if ((acb->dev_id == 0x1880)) {
  3450. do {
  3451. count++;
  3452. writel(0xF, &pmuC->write_sequence);
  3453. writel(0x4, &pmuC->write_sequence);
  3454. writel(0xB, &pmuC->write_sequence);
  3455. writel(0x2, &pmuC->write_sequence);
  3456. writel(0x7, &pmuC->write_sequence);
  3457. writel(0xD, &pmuC->write_sequence);
  3458. } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
  3459. writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
  3460. } else if ((acb->dev_id == 0x1214)) {
  3461. writel(0x20, pmuD->reset_request);
  3462. } else {
  3463. pci_write_config_byte(acb->pdev, 0x84, 0x20);
  3464. }
  3465. msleep(2000);
  3466. /* write back pci config data */
  3467. for (i = 0; i < 64; i++) {
  3468. pci_write_config_byte(acb->pdev, i, value[i]);
  3469. }
  3470. msleep(1000);
  3471. return;
  3472. }
  3473. static void arcmsr_iop_init(struct AdapterControlBlock *acb)
  3474. {
  3475. uint32_t intmask_org;
  3476. /* disable all outbound interrupt */
  3477. intmask_org = arcmsr_disable_outbound_ints(acb);
  3478. arcmsr_wait_firmware_ready(acb);
  3479. arcmsr_iop_confirm(acb);
  3480. /*start background rebuild*/
  3481. arcmsr_start_adapter_bgrb(acb);
  3482. /* empty doorbell Qbuffer if door bell ringed */
  3483. arcmsr_clear_doorbell_queue_buffer(acb);
  3484. arcmsr_enable_eoi_mode(acb);
  3485. /* enable outbound Post Queue,outbound doorbell Interrupt */
  3486. arcmsr_enable_outbound_ints(acb, intmask_org);
  3487. acb->acb_flags |= ACB_F_IOP_INITED;
  3488. }
  3489. static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
  3490. {
  3491. struct CommandControlBlock *ccb;
  3492. uint32_t intmask_org;
  3493. uint8_t rtnval = 0x00;
  3494. int i = 0;
  3495. unsigned long flags;
  3496. if (atomic_read(&acb->ccboutstandingcount) != 0) {
  3497. /* disable all outbound interrupt */
  3498. intmask_org = arcmsr_disable_outbound_ints(acb);
  3499. /* talk to iop 331 outstanding command aborted */
  3500. rtnval = arcmsr_abort_allcmd(acb);
  3501. /* clear all outbound posted Q */
  3502. arcmsr_done4abort_postqueue(acb);
  3503. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  3504. ccb = acb->pccb_pool[i];
  3505. if (ccb->startdone == ARCMSR_CCB_START) {
  3506. scsi_dma_unmap(ccb->pcmd);
  3507. ccb->startdone = ARCMSR_CCB_DONE;
  3508. ccb->ccb_flags = 0;
  3509. spin_lock_irqsave(&acb->ccblist_lock, flags);
  3510. list_add_tail(&ccb->list, &acb->ccb_free_list);
  3511. spin_unlock_irqrestore(&acb->ccblist_lock, flags);
  3512. }
  3513. }
  3514. atomic_set(&acb->ccboutstandingcount, 0);
  3515. /* enable all outbound interrupt */
  3516. arcmsr_enable_outbound_ints(acb, intmask_org);
  3517. return rtnval;
  3518. }
  3519. return rtnval;
  3520. }
  3521. static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
  3522. {
  3523. struct AdapterControlBlock *acb;
  3524. uint32_t intmask_org, outbound_doorbell;
  3525. int retry_count = 0;
  3526. int rtn = FAILED;
  3527. acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
  3528. printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
  3529. acb->num_resets++;
  3530. switch(acb->adapter_type){
  3531. case ACB_ADAPTER_TYPE_A:{
  3532. if (acb->acb_flags & ACB_F_BUS_RESET){
  3533. long timeout;
  3534. printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
  3535. timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
  3536. if (timeout) {
  3537. return SUCCESS;
  3538. }
  3539. }
  3540. acb->acb_flags |= ACB_F_BUS_RESET;
  3541. if (!arcmsr_iop_reset(acb)) {
  3542. struct MessageUnit_A __iomem *reg;
  3543. reg = acb->pmuA;
  3544. arcmsr_hardware_reset(acb);
  3545. acb->acb_flags &= ~ACB_F_IOP_INITED;
  3546. sleep_again:
  3547. ssleep(ARCMSR_SLEEPTIME);
  3548. if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
  3549. printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
  3550. if (retry_count > ARCMSR_RETRYCOUNT) {
  3551. acb->fw_flag = FW_DEADLOCK;
  3552. printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
  3553. return FAILED;
  3554. }
  3555. retry_count++;
  3556. goto sleep_again;
  3557. }
  3558. acb->acb_flags |= ACB_F_IOP_INITED;
  3559. /* disable all outbound interrupt */
  3560. intmask_org = arcmsr_disable_outbound_ints(acb);
  3561. arcmsr_get_firmware_spec(acb);
  3562. arcmsr_start_adapter_bgrb(acb);
  3563. /* clear Qbuffer if door bell ringed */
  3564. outbound_doorbell = readl(&reg->outbound_doorbell);
  3565. writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
  3566. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  3567. /* enable outbound Post Queue,outbound doorbell Interrupt */
  3568. arcmsr_enable_outbound_ints(acb, intmask_org);
  3569. atomic_set(&acb->rq_map_token, 16);
  3570. atomic_set(&acb->ante_token_value, 16);
  3571. acb->fw_flag = FW_NORMAL;
  3572. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  3573. acb->acb_flags &= ~ACB_F_BUS_RESET;
  3574. rtn = SUCCESS;
  3575. printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
  3576. } else {
  3577. acb->acb_flags &= ~ACB_F_BUS_RESET;
  3578. atomic_set(&acb->rq_map_token, 16);
  3579. atomic_set(&acb->ante_token_value, 16);
  3580. acb->fw_flag = FW_NORMAL;
  3581. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
  3582. rtn = SUCCESS;
  3583. }
  3584. break;
  3585. }
  3586. case ACB_ADAPTER_TYPE_B:{
  3587. acb->acb_flags |= ACB_F_BUS_RESET;
  3588. if (!arcmsr_iop_reset(acb)) {
  3589. acb->acb_flags &= ~ACB_F_BUS_RESET;
  3590. rtn = FAILED;
  3591. } else {
  3592. acb->acb_flags &= ~ACB_F_BUS_RESET;
  3593. atomic_set(&acb->rq_map_token, 16);
  3594. atomic_set(&acb->ante_token_value, 16);
  3595. acb->fw_flag = FW_NORMAL;
  3596. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  3597. rtn = SUCCESS;
  3598. }
  3599. break;
  3600. }
  3601. case ACB_ADAPTER_TYPE_C:{
  3602. if (acb->acb_flags & ACB_F_BUS_RESET) {
  3603. long timeout;
  3604. printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
  3605. timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
  3606. if (timeout) {
  3607. return SUCCESS;
  3608. }
  3609. }
  3610. acb->acb_flags |= ACB_F_BUS_RESET;
  3611. if (!arcmsr_iop_reset(acb)) {
  3612. struct MessageUnit_C __iomem *reg;
  3613. reg = acb->pmuC;
  3614. arcmsr_hardware_reset(acb);
  3615. acb->acb_flags &= ~ACB_F_IOP_INITED;
  3616. sleep:
  3617. ssleep(ARCMSR_SLEEPTIME);
  3618. if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
  3619. printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
  3620. if (retry_count > ARCMSR_RETRYCOUNT) {
  3621. acb->fw_flag = FW_DEADLOCK;
  3622. printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
  3623. return FAILED;
  3624. }
  3625. retry_count++;
  3626. goto sleep;
  3627. }
  3628. acb->acb_flags |= ACB_F_IOP_INITED;
  3629. /* disable all outbound interrupt */
  3630. intmask_org = arcmsr_disable_outbound_ints(acb);
  3631. arcmsr_get_firmware_spec(acb);
  3632. arcmsr_start_adapter_bgrb(acb);
  3633. /* clear Qbuffer if door bell ringed */
  3634. arcmsr_clear_doorbell_queue_buffer(acb);
  3635. /* enable outbound Post Queue,outbound doorbell Interrupt */
  3636. arcmsr_enable_outbound_ints(acb, intmask_org);
  3637. atomic_set(&acb->rq_map_token, 16);
  3638. atomic_set(&acb->ante_token_value, 16);
  3639. acb->fw_flag = FW_NORMAL;
  3640. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  3641. acb->acb_flags &= ~ACB_F_BUS_RESET;
  3642. rtn = SUCCESS;
  3643. printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
  3644. } else {
  3645. acb->acb_flags &= ~ACB_F_BUS_RESET;
  3646. atomic_set(&acb->rq_map_token, 16);
  3647. atomic_set(&acb->ante_token_value, 16);
  3648. acb->fw_flag = FW_NORMAL;
  3649. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
  3650. rtn = SUCCESS;
  3651. }
  3652. break;
  3653. }
  3654. case ACB_ADAPTER_TYPE_D: {
  3655. if (acb->acb_flags & ACB_F_BUS_RESET) {
  3656. long timeout;
  3657. pr_notice("arcmsr: there is an bus reset"
  3658. " eh proceeding.......\n");
  3659. timeout = wait_event_timeout(wait_q, (acb->acb_flags
  3660. & ACB_F_BUS_RESET) == 0, 220 * HZ);
  3661. if (timeout)
  3662. return SUCCESS;
  3663. }
  3664. acb->acb_flags |= ACB_F_BUS_RESET;
  3665. if (!arcmsr_iop_reset(acb)) {
  3666. struct MessageUnit_D *reg;
  3667. reg = acb->pmuD;
  3668. arcmsr_hardware_reset(acb);
  3669. acb->acb_flags &= ~ACB_F_IOP_INITED;
  3670. nap:
  3671. ssleep(ARCMSR_SLEEPTIME);
  3672. if ((readl(reg->sample_at_reset) & 0x80) != 0) {
  3673. pr_err("arcmsr%d: waiting for "
  3674. "hw bus reset return, retry=%d\n",
  3675. acb->host->host_no, retry_count);
  3676. if (retry_count > ARCMSR_RETRYCOUNT) {
  3677. acb->fw_flag = FW_DEADLOCK;
  3678. pr_err("arcmsr%d: waiting for hw bus"
  3679. " reset return, "
  3680. "RETRY TERMINATED!!\n",
  3681. acb->host->host_no);
  3682. return FAILED;
  3683. }
  3684. retry_count++;
  3685. goto nap;
  3686. }
  3687. acb->acb_flags |= ACB_F_IOP_INITED;
  3688. /* disable all outbound interrupt */
  3689. intmask_org = arcmsr_disable_outbound_ints(acb);
  3690. arcmsr_get_firmware_spec(acb);
  3691. arcmsr_start_adapter_bgrb(acb);
  3692. arcmsr_clear_doorbell_queue_buffer(acb);
  3693. arcmsr_enable_outbound_ints(acb, intmask_org);
  3694. atomic_set(&acb->rq_map_token, 16);
  3695. atomic_set(&acb->ante_token_value, 16);
  3696. acb->fw_flag = FW_NORMAL;
  3697. mod_timer(&acb->eternal_timer,
  3698. jiffies + msecs_to_jiffies(6 * HZ));
  3699. acb->acb_flags &= ~ACB_F_BUS_RESET;
  3700. rtn = SUCCESS;
  3701. pr_err("arcmsr: scsi bus reset "
  3702. "eh returns with success\n");
  3703. } else {
  3704. acb->acb_flags &= ~ACB_F_BUS_RESET;
  3705. atomic_set(&acb->rq_map_token, 16);
  3706. atomic_set(&acb->ante_token_value, 16);
  3707. acb->fw_flag = FW_NORMAL;
  3708. mod_timer(&acb->eternal_timer,
  3709. jiffies + msecs_to_jiffies(6 * HZ));
  3710. rtn = SUCCESS;
  3711. }
  3712. break;
  3713. }
  3714. }
  3715. return rtn;
  3716. }
  3717. static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
  3718. struct CommandControlBlock *ccb)
  3719. {
  3720. int rtn;
  3721. rtn = arcmsr_polling_ccbdone(acb, ccb);
  3722. return rtn;
  3723. }
  3724. static int arcmsr_abort(struct scsi_cmnd *cmd)
  3725. {
  3726. struct AdapterControlBlock *acb =
  3727. (struct AdapterControlBlock *)cmd->device->host->hostdata;
  3728. int i = 0;
  3729. int rtn = FAILED;
  3730. uint32_t intmask_org;
  3731. printk(KERN_NOTICE
  3732. "arcmsr%d: abort device command of scsi id = %d lun = %d\n",
  3733. acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
  3734. acb->acb_flags |= ACB_F_ABORT;
  3735. acb->num_aborts++;
  3736. /*
  3737. ************************************************
  3738. ** the all interrupt service routine is locked
  3739. ** we need to handle it as soon as possible and exit
  3740. ************************************************
  3741. */
  3742. if (!atomic_read(&acb->ccboutstandingcount)) {
  3743. acb->acb_flags &= ~ACB_F_ABORT;
  3744. return rtn;
  3745. }
  3746. intmask_org = arcmsr_disable_outbound_ints(acb);
  3747. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  3748. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  3749. if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
  3750. ccb->startdone = ARCMSR_CCB_ABORTED;
  3751. rtn = arcmsr_abort_one_cmd(acb, ccb);
  3752. break;
  3753. }
  3754. }
  3755. acb->acb_flags &= ~ACB_F_ABORT;
  3756. arcmsr_enable_outbound_ints(acb, intmask_org);
  3757. return rtn;
  3758. }
  3759. static const char *arcmsr_info(struct Scsi_Host *host)
  3760. {
  3761. struct AdapterControlBlock *acb =
  3762. (struct AdapterControlBlock *) host->hostdata;
  3763. static char buf[256];
  3764. char *type;
  3765. int raid6 = 1;
  3766. switch (acb->pdev->device) {
  3767. case PCI_DEVICE_ID_ARECA_1110:
  3768. case PCI_DEVICE_ID_ARECA_1200:
  3769. case PCI_DEVICE_ID_ARECA_1202:
  3770. case PCI_DEVICE_ID_ARECA_1210:
  3771. raid6 = 0;
  3772. /*FALLTHRU*/
  3773. case PCI_DEVICE_ID_ARECA_1120:
  3774. case PCI_DEVICE_ID_ARECA_1130:
  3775. case PCI_DEVICE_ID_ARECA_1160:
  3776. case PCI_DEVICE_ID_ARECA_1170:
  3777. case PCI_DEVICE_ID_ARECA_1201:
  3778. case PCI_DEVICE_ID_ARECA_1203:
  3779. case PCI_DEVICE_ID_ARECA_1220:
  3780. case PCI_DEVICE_ID_ARECA_1230:
  3781. case PCI_DEVICE_ID_ARECA_1260:
  3782. case PCI_DEVICE_ID_ARECA_1270:
  3783. case PCI_DEVICE_ID_ARECA_1280:
  3784. type = "SATA";
  3785. break;
  3786. case PCI_DEVICE_ID_ARECA_1214:
  3787. case PCI_DEVICE_ID_ARECA_1380:
  3788. case PCI_DEVICE_ID_ARECA_1381:
  3789. case PCI_DEVICE_ID_ARECA_1680:
  3790. case PCI_DEVICE_ID_ARECA_1681:
  3791. case PCI_DEVICE_ID_ARECA_1880:
  3792. type = "SAS/SATA";
  3793. break;
  3794. default:
  3795. type = "unknown";
  3796. raid6 = 0;
  3797. break;
  3798. }
  3799. sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n",
  3800. type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
  3801. return buf;
  3802. }