arcmsr_hba.c 97 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127
  1. /*
  2. *******************************************************************************
  3. ** O.S : Linux
  4. ** FILE NAME : arcmsr_hba.c
  5. ** BY : Nick Cheng
  6. ** Description: SCSI RAID Device Driver for
  7. ** ARECA RAID Host adapter
  8. *******************************************************************************
  9. ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
  10. **
  11. ** Web site: www.areca.com.tw
  12. ** E-mail: support@areca.com.tw
  13. **
  14. ** This program is free software; you can redistribute it and/or modify
  15. ** it under the terms of the GNU General Public License version 2 as
  16. ** published by the Free Software Foundation.
  17. ** This program is distributed in the hope that it will be useful,
  18. ** but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. ** GNU General Public License for more details.
  21. *******************************************************************************
  22. ** Redistribution and use in source and binary forms, with or without
  23. ** modification, are permitted provided that the following conditions
  24. ** are met:
  25. ** 1. Redistributions of source code must retain the above copyright
  26. ** notice, this list of conditions and the following disclaimer.
  27. ** 2. Redistributions in binary form must reproduce the above copyright
  28. ** notice, this list of conditions and the following disclaimer in the
  29. ** documentation and/or other materials provided with the distribution.
  30. ** 3. The name of the author may not be used to endorse or promote products
  31. ** derived from this software without specific prior written permission.
  32. **
  33. ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  34. ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  35. ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  36. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  37. ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
  38. ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  39. ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
  40. ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  41. ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
  42. ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  43. *******************************************************************************
  44. ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
  45. ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
  46. *******************************************************************************
  47. */
  48. #include <linux/module.h>
  49. #include <linux/reboot.h>
  50. #include <linux/spinlock.h>
  51. #include <linux/pci_ids.h>
  52. #include <linux/interrupt.h>
  53. #include <linux/moduleparam.h>
  54. #include <linux/errno.h>
  55. #include <linux/types.h>
  56. #include <linux/delay.h>
  57. #include <linux/dma-mapping.h>
  58. #include <linux/timer.h>
  59. #include <linux/slab.h>
  60. #include <linux/pci.h>
  61. #include <linux/aer.h>
  62. #include <asm/dma.h>
  63. #include <asm/io.h>
  64. #include <asm/uaccess.h>
  65. #include <scsi/scsi_host.h>
  66. #include <scsi/scsi.h>
  67. #include <scsi/scsi_cmnd.h>
  68. #include <scsi/scsi_tcq.h>
  69. #include <scsi/scsi_device.h>
  70. #include <scsi/scsi_transport.h>
  71. #include <scsi/scsicam.h>
  72. #include "arcmsr.h"
  73. MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>");
  74. MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter");
  75. MODULE_LICENSE("Dual BSD/GPL");
  76. MODULE_VERSION(ARCMSR_DRIVER_VERSION);
  77. #define ARCMSR_SLEEPTIME 10
  78. #define ARCMSR_RETRYCOUNT 12
  79. wait_queue_head_t wait_q;
  80. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
  81. struct scsi_cmnd *cmd);
  82. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
  83. static int arcmsr_abort(struct scsi_cmnd *);
  84. static int arcmsr_bus_reset(struct scsi_cmnd *);
  85. static int arcmsr_bios_param(struct scsi_device *sdev,
  86. struct block_device *bdev, sector_t capacity, int *info);
  87. static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
  88. static int arcmsr_probe(struct pci_dev *pdev,
  89. const struct pci_device_id *id);
  90. static void arcmsr_remove(struct pci_dev *pdev);
  91. static void arcmsr_shutdown(struct pci_dev *pdev);
  92. static void arcmsr_iop_init(struct AdapterControlBlock *acb);
  93. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
  94. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
  95. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
  96. static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
  97. static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
  98. static void arcmsr_request_device_map(unsigned long pacb);
  99. static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
  100. static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
  101. static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb);
  102. static void arcmsr_message_isr_bh_fn(struct work_struct *work);
  103. static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
  104. static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
  105. static void arcmsr_hbc_message_isr(struct AdapterControlBlock *pACB);
  106. static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
  107. static const char *arcmsr_info(struct Scsi_Host *);
  108. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
  109. static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
  110. int queue_depth, int reason)
  111. {
  112. if (reason != SCSI_QDEPTH_DEFAULT)
  113. return -EOPNOTSUPP;
  114. if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
  115. queue_depth = ARCMSR_MAX_CMD_PERLUN;
  116. scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
  117. return queue_depth;
  118. }
  119. static struct scsi_host_template arcmsr_scsi_host_template = {
  120. .module = THIS_MODULE,
  121. .name = "ARCMSR ARECA SATA/SAS RAID Controller"
  122. ARCMSR_DRIVER_VERSION,
  123. .info = arcmsr_info,
  124. .queuecommand = arcmsr_queue_command,
  125. .eh_abort_handler = arcmsr_abort,
  126. .eh_bus_reset_handler = arcmsr_bus_reset,
  127. .bios_param = arcmsr_bios_param,
  128. .change_queue_depth = arcmsr_adjust_disk_queue_depth,
  129. .can_queue = ARCMSR_MAX_FREECCB_NUM,
  130. .this_id = ARCMSR_SCSI_INITIATOR_ID,
  131. .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
  132. .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
  133. .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
  134. .use_clustering = ENABLE_CLUSTERING,
  135. .shost_attrs = arcmsr_host_attrs,
  136. };
  137. static struct pci_device_id arcmsr_device_id_table[] = {
  138. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
  139. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
  140. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
  141. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
  142. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
  143. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
  144. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
  145. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
  146. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
  147. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
  148. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
  149. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
  150. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
  151. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
  152. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
  153. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
  154. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
  155. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
  156. {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880)},
  157. {0, 0}, /* Terminating entry */
  158. };
  159. MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
  160. static struct pci_driver arcmsr_pci_driver = {
  161. .name = "arcmsr",
  162. .id_table = arcmsr_device_id_table,
  163. .probe = arcmsr_probe,
  164. .remove = arcmsr_remove,
  165. .shutdown = arcmsr_shutdown,
  166. };
  167. /*
  168. ****************************************************************************
  169. ****************************************************************************
  170. */
  171. static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb)
  172. {
  173. switch (acb->adapter_type) {
  174. case ACB_ADAPTER_TYPE_A:
  175. case ACB_ADAPTER_TYPE_C:
  176. break;
  177. case ACB_ADAPTER_TYPE_B:{
  178. dma_free_coherent(&acb->pdev->dev,
  179. sizeof(struct MessageUnit_B),
  180. acb->pmuB, acb->dma_coherent_handle_hbb_mu);
  181. }
  182. }
  183. }
  184. static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
  185. {
  186. struct pci_dev *pdev = acb->pdev;
  187. switch (acb->adapter_type){
  188. case ACB_ADAPTER_TYPE_A:{
  189. acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
  190. if (!acb->pmuA) {
  191. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
  192. return false;
  193. }
  194. break;
  195. }
  196. case ACB_ADAPTER_TYPE_B:{
  197. void __iomem *mem_base0, *mem_base1;
  198. mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
  199. if (!mem_base0) {
  200. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
  201. return false;
  202. }
  203. mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
  204. if (!mem_base1) {
  205. iounmap(mem_base0);
  206. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
  207. return false;
  208. }
  209. acb->mem_base0 = mem_base0;
  210. acb->mem_base1 = mem_base1;
  211. break;
  212. }
  213. case ACB_ADAPTER_TYPE_C:{
  214. acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
  215. if (!acb->pmuC) {
  216. printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
  217. return false;
  218. }
  219. if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
  220. writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
  221. return true;
  222. }
  223. break;
  224. }
  225. }
  226. return true;
  227. }
  228. static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
  229. {
  230. switch (acb->adapter_type) {
  231. case ACB_ADAPTER_TYPE_A:{
  232. iounmap(acb->pmuA);
  233. }
  234. break;
  235. case ACB_ADAPTER_TYPE_B:{
  236. iounmap(acb->mem_base0);
  237. iounmap(acb->mem_base1);
  238. }
  239. break;
  240. case ACB_ADAPTER_TYPE_C:{
  241. iounmap(acb->pmuC);
  242. }
  243. }
  244. }
  245. static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
  246. {
  247. irqreturn_t handle_state;
  248. struct AdapterControlBlock *acb = dev_id;
  249. handle_state = arcmsr_interrupt(acb);
  250. return handle_state;
  251. }
  252. static int arcmsr_bios_param(struct scsi_device *sdev,
  253. struct block_device *bdev, sector_t capacity, int *geom)
  254. {
  255. int ret, heads, sectors, cylinders, total_capacity;
  256. unsigned char *buffer;/* return copy of block device's partition table */
  257. buffer = scsi_bios_ptable(bdev);
  258. if (buffer) {
  259. ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
  260. kfree(buffer);
  261. if (ret != -1)
  262. return ret;
  263. }
  264. total_capacity = capacity;
  265. heads = 64;
  266. sectors = 32;
  267. cylinders = total_capacity / (heads * sectors);
  268. if (cylinders > 1024) {
  269. heads = 255;
  270. sectors = 63;
  271. cylinders = total_capacity / (heads * sectors);
  272. }
  273. geom[0] = heads;
  274. geom[1] = sectors;
  275. geom[2] = cylinders;
  276. return 0;
  277. }
  278. static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
  279. {
  280. struct pci_dev *pdev = acb->pdev;
  281. u16 dev_id;
  282. pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
  283. acb->dev_id = dev_id;
  284. switch (dev_id) {
  285. case 0x1880: {
  286. acb->adapter_type = ACB_ADAPTER_TYPE_C;
  287. }
  288. break;
  289. case 0x1201: {
  290. acb->adapter_type = ACB_ADAPTER_TYPE_B;
  291. }
  292. break;
  293. default: acb->adapter_type = ACB_ADAPTER_TYPE_A;
  294. }
  295. }
  296. static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
  297. {
  298. struct MessageUnit_A __iomem *reg = acb->pmuA;
  299. int i;
  300. for (i = 0; i < 2000; i++) {
  301. if (readl(&reg->outbound_intstatus) &
  302. ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
  303. writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
  304. &reg->outbound_intstatus);
  305. return true;
  306. }
  307. msleep(10);
  308. } /* max 20 seconds */
  309. return false;
  310. }
  311. static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
  312. {
  313. struct MessageUnit_B *reg = acb->pmuB;
  314. int i;
  315. for (i = 0; i < 2000; i++) {
  316. if (readl(reg->iop2drv_doorbell)
  317. & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
  318. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
  319. reg->iop2drv_doorbell);
  320. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
  321. reg->drv2iop_doorbell);
  322. return true;
  323. }
  324. msleep(10);
  325. } /* max 20 seconds */
  326. return false;
  327. }
  328. static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB)
  329. {
  330. struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
  331. int i;
  332. for (i = 0; i < 2000; i++) {
  333. if (readl(&phbcmu->outbound_doorbell)
  334. & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
  335. writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
  336. &phbcmu->outbound_doorbell_clear); /*clear interrupt*/
  337. return true;
  338. }
  339. msleep(10);
  340. } /* max 20 seconds */
  341. return false;
  342. }
  343. static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
  344. {
  345. struct MessageUnit_A __iomem *reg = acb->pmuA;
  346. int retry_count = 30;
  347. writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
  348. do {
  349. if (arcmsr_hba_wait_msgint_ready(acb))
  350. break;
  351. else {
  352. retry_count--;
  353. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  354. timeout, retry count down = %d \n", acb->host->host_no, retry_count);
  355. }
  356. } while (retry_count != 0);
  357. }
  358. static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
  359. {
  360. struct MessageUnit_B *reg = acb->pmuB;
  361. int retry_count = 30;
  362. writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
  363. do {
  364. if (arcmsr_hbb_wait_msgint_ready(acb))
  365. break;
  366. else {
  367. retry_count--;
  368. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  369. timeout,retry count down = %d \n", acb->host->host_no, retry_count);
  370. }
  371. } while (retry_count != 0);
  372. }
  373. static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *pACB)
  374. {
  375. struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
  376. int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
  377. writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
  378. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  379. do {
  380. if (arcmsr_hbc_wait_msgint_ready(pACB)) {
  381. break;
  382. } else {
  383. retry_count--;
  384. printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
  385. timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
  386. }
  387. } while (retry_count != 0);
  388. return;
  389. }
  390. static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
  391. {
  392. switch (acb->adapter_type) {
  393. case ACB_ADAPTER_TYPE_A: {
  394. arcmsr_flush_hba_cache(acb);
  395. }
  396. break;
  397. case ACB_ADAPTER_TYPE_B: {
  398. arcmsr_flush_hbb_cache(acb);
  399. }
  400. break;
  401. case ACB_ADAPTER_TYPE_C: {
  402. arcmsr_flush_hbc_cache(acb);
  403. }
  404. }
  405. }
  406. static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
  407. {
  408. struct pci_dev *pdev = acb->pdev;
  409. void *dma_coherent;
  410. dma_addr_t dma_coherent_handle;
  411. struct CommandControlBlock *ccb_tmp;
  412. int i = 0, j = 0;
  413. dma_addr_t cdb_phyaddr;
  414. unsigned long roundup_ccbsize;
  415. unsigned long max_xfer_len;
  416. unsigned long max_sg_entrys;
  417. uint32_t firm_config_version;
  418. for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
  419. for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
  420. acb->devstate[i][j] = ARECA_RAID_GONE;
  421. max_xfer_len = ARCMSR_MAX_XFER_LEN;
  422. max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
  423. firm_config_version = acb->firm_cfg_version;
  424. if((firm_config_version & 0xFF) >= 3){
  425. max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
  426. max_sg_entrys = (max_xfer_len/4096);
  427. }
  428. acb->host->max_sectors = max_xfer_len/512;
  429. acb->host->sg_tablesize = max_sg_entrys;
  430. roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
  431. acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM;
  432. dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
  433. if(!dma_coherent){
  434. printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no);
  435. return -ENOMEM;
  436. }
  437. acb->dma_coherent = dma_coherent;
  438. acb->dma_coherent_handle = dma_coherent_handle;
  439. memset(dma_coherent, 0, acb->uncache_size);
  440. ccb_tmp = dma_coherent;
  441. acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
  442. for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
  443. cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
  444. ccb_tmp->cdb_phyaddr_pattern = ((acb->adapter_type == ACB_ADAPTER_TYPE_C) ? cdb_phyaddr : (cdb_phyaddr >> 5));
  445. acb->pccb_pool[i] = ccb_tmp;
  446. ccb_tmp->acb = acb;
  447. INIT_LIST_HEAD(&ccb_tmp->list);
  448. list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
  449. ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
  450. dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
  451. }
  452. return 0;
  453. }
  454. static void arcmsr_message_isr_bh_fn(struct work_struct *work)
  455. {
  456. struct AdapterControlBlock *acb = container_of(work,struct AdapterControlBlock, arcmsr_do_message_isr_bh);
  457. switch (acb->adapter_type) {
  458. case ACB_ADAPTER_TYPE_A: {
  459. struct MessageUnit_A __iomem *reg = acb->pmuA;
  460. char *acb_dev_map = (char *)acb->device_map;
  461. uint32_t __iomem *signature = (uint32_t __iomem*) (&reg->message_rwbuffer[0]);
  462. char __iomem *devicemap = (char __iomem*) (&reg->message_rwbuffer[21]);
  463. int target, lun;
  464. struct scsi_device *psdev;
  465. char diff;
  466. atomic_inc(&acb->rq_map_token);
  467. if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
  468. for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
  469. diff = (*acb_dev_map)^readb(devicemap);
  470. if (diff != 0) {
  471. char temp;
  472. *acb_dev_map = readb(devicemap);
  473. temp =*acb_dev_map;
  474. for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
  475. if((temp & 0x01)==1 && (diff & 0x01) == 1) {
  476. scsi_add_device(acb->host, 0, target, lun);
  477. }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
  478. psdev = scsi_device_lookup(acb->host, 0, target, lun);
  479. if (psdev != NULL ) {
  480. scsi_remove_device(psdev);
  481. scsi_device_put(psdev);
  482. }
  483. }
  484. temp >>= 1;
  485. diff >>= 1;
  486. }
  487. }
  488. devicemap++;
  489. acb_dev_map++;
  490. }
  491. }
  492. break;
  493. }
  494. case ACB_ADAPTER_TYPE_B: {
  495. struct MessageUnit_B *reg = acb->pmuB;
  496. char *acb_dev_map = (char *)acb->device_map;
  497. uint32_t __iomem *signature = (uint32_t __iomem*)(&reg->message_rwbuffer[0]);
  498. char __iomem *devicemap = (char __iomem*)(&reg->message_rwbuffer[21]);
  499. int target, lun;
  500. struct scsi_device *psdev;
  501. char diff;
  502. atomic_inc(&acb->rq_map_token);
  503. if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
  504. for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
  505. diff = (*acb_dev_map)^readb(devicemap);
  506. if (diff != 0) {
  507. char temp;
  508. *acb_dev_map = readb(devicemap);
  509. temp =*acb_dev_map;
  510. for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
  511. if((temp & 0x01)==1 && (diff & 0x01) == 1) {
  512. scsi_add_device(acb->host, 0, target, lun);
  513. }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
  514. psdev = scsi_device_lookup(acb->host, 0, target, lun);
  515. if (psdev != NULL ) {
  516. scsi_remove_device(psdev);
  517. scsi_device_put(psdev);
  518. }
  519. }
  520. temp >>= 1;
  521. diff >>= 1;
  522. }
  523. }
  524. devicemap++;
  525. acb_dev_map++;
  526. }
  527. }
  528. }
  529. break;
  530. case ACB_ADAPTER_TYPE_C: {
  531. struct MessageUnit_C *reg = acb->pmuC;
  532. char *acb_dev_map = (char *)acb->device_map;
  533. uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
  534. char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
  535. int target, lun;
  536. struct scsi_device *psdev;
  537. char diff;
  538. atomic_inc(&acb->rq_map_token);
  539. if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
  540. for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
  541. diff = (*acb_dev_map)^readb(devicemap);
  542. if (diff != 0) {
  543. char temp;
  544. *acb_dev_map = readb(devicemap);
  545. temp = *acb_dev_map;
  546. for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
  547. if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
  548. scsi_add_device(acb->host, 0, target, lun);
  549. } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
  550. psdev = scsi_device_lookup(acb->host, 0, target, lun);
  551. if (psdev != NULL) {
  552. scsi_remove_device(psdev);
  553. scsi_device_put(psdev);
  554. }
  555. }
  556. temp >>= 1;
  557. diff >>= 1;
  558. }
  559. }
  560. devicemap++;
  561. acb_dev_map++;
  562. }
  563. }
  564. }
  565. }
  566. }
  567. static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  568. {
  569. struct Scsi_Host *host;
  570. struct AdapterControlBlock *acb;
  571. uint8_t bus,dev_fun;
  572. int error;
  573. error = pci_enable_device(pdev);
  574. if(error){
  575. return -ENODEV;
  576. }
  577. host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
  578. if(!host){
  579. goto pci_disable_dev;
  580. }
  581. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  582. if(error){
  583. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  584. if(error){
  585. printk(KERN_WARNING
  586. "scsi%d: No suitable DMA mask available\n",
  587. host->host_no);
  588. goto scsi_host_release;
  589. }
  590. }
  591. init_waitqueue_head(&wait_q);
  592. bus = pdev->bus->number;
  593. dev_fun = pdev->devfn;
  594. acb = (struct AdapterControlBlock *) host->hostdata;
  595. memset(acb,0,sizeof(struct AdapterControlBlock));
  596. acb->pdev = pdev;
  597. acb->host = host;
  598. host->max_lun = ARCMSR_MAX_TARGETLUN;
  599. host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
  600. host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
  601. host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
  602. host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
  603. host->this_id = ARCMSR_SCSI_INITIATOR_ID;
  604. host->unique_id = (bus << 8) | dev_fun;
  605. pci_set_drvdata(pdev, host);
  606. pci_set_master(pdev);
  607. error = pci_request_regions(pdev, "arcmsr");
  608. if(error){
  609. goto scsi_host_release;
  610. }
  611. spin_lock_init(&acb->eh_lock);
  612. spin_lock_init(&acb->ccblist_lock);
  613. acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  614. ACB_F_MESSAGE_RQBUFFER_CLEARED |
  615. ACB_F_MESSAGE_WQBUFFER_READED);
  616. acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
  617. INIT_LIST_HEAD(&acb->ccb_free_list);
  618. arcmsr_define_adapter_type(acb);
  619. error = arcmsr_remap_pciregion(acb);
  620. if(!error){
  621. goto pci_release_regs;
  622. }
  623. error = arcmsr_get_firmware_spec(acb);
  624. if(!error){
  625. goto unmap_pci_region;
  626. }
  627. error = arcmsr_alloc_ccb_pool(acb);
  628. if(error){
  629. goto free_hbb_mu;
  630. }
  631. arcmsr_iop_init(acb);
  632. error = scsi_add_host(host, &pdev->dev);
  633. if(error){
  634. goto RAID_controller_stop;
  635. }
  636. error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb);
  637. if(error){
  638. goto scsi_host_remove;
  639. }
  640. host->irq = pdev->irq;
  641. scsi_scan_host(host);
  642. INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
  643. atomic_set(&acb->rq_map_token, 16);
  644. atomic_set(&acb->ante_token_value, 16);
  645. acb->fw_flag = FW_NORMAL;
  646. init_timer(&acb->eternal_timer);
  647. acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
  648. acb->eternal_timer.data = (unsigned long) acb;
  649. acb->eternal_timer.function = &arcmsr_request_device_map;
  650. add_timer(&acb->eternal_timer);
  651. if(arcmsr_alloc_sysfs_attr(acb))
  652. goto out_free_sysfs;
  653. return 0;
  654. out_free_sysfs:
  655. scsi_host_remove:
  656. scsi_remove_host(host);
  657. RAID_controller_stop:
  658. arcmsr_stop_adapter_bgrb(acb);
  659. arcmsr_flush_adapter_cache(acb);
  660. arcmsr_free_ccb_pool(acb);
  661. free_hbb_mu:
  662. arcmsr_free_hbb_mu(acb);
  663. unmap_pci_region:
  664. arcmsr_unmap_pciregion(acb);
  665. pci_release_regs:
  666. pci_release_regions(pdev);
  667. scsi_host_release:
  668. scsi_host_put(host);
  669. pci_disable_dev:
  670. pci_disable_device(pdev);
  671. return -ENODEV;
  672. }
  673. static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
  674. {
  675. struct MessageUnit_A __iomem *reg = acb->pmuA;
  676. writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
  677. if (!arcmsr_hba_wait_msgint_ready(acb)) {
  678. printk(KERN_NOTICE
  679. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  680. , acb->host->host_no);
  681. return false;
  682. }
  683. return true;
  684. }
  685. static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
  686. {
  687. struct MessageUnit_B *reg = acb->pmuB;
  688. writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
  689. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  690. printk(KERN_NOTICE
  691. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  692. , acb->host->host_no);
  693. return false;
  694. }
  695. return true;
  696. }
  697. static uint8_t arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *pACB)
  698. {
  699. struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
  700. writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
  701. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  702. if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
  703. printk(KERN_NOTICE
  704. "arcmsr%d: wait 'abort all outstanding command' timeout \n"
  705. , pACB->host->host_no);
  706. return false;
  707. }
  708. return true;
  709. }
  710. static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
  711. {
  712. uint8_t rtnval = 0;
  713. switch (acb->adapter_type) {
  714. case ACB_ADAPTER_TYPE_A: {
  715. rtnval = arcmsr_abort_hba_allcmd(acb);
  716. }
  717. break;
  718. case ACB_ADAPTER_TYPE_B: {
  719. rtnval = arcmsr_abort_hbb_allcmd(acb);
  720. }
  721. break;
  722. case ACB_ADAPTER_TYPE_C: {
  723. rtnval = arcmsr_abort_hbc_allcmd(acb);
  724. }
  725. }
  726. return rtnval;
  727. }
  728. static bool arcmsr_hbb_enable_driver_mode(struct AdapterControlBlock *pacb)
  729. {
  730. struct MessageUnit_B *reg = pacb->pmuB;
  731. writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
  732. if (!arcmsr_hbb_wait_msgint_ready(pacb)) {
  733. printk(KERN_ERR "arcmsr%d: can't set driver mode. \n", pacb->host->host_no);
  734. return false;
  735. }
  736. return true;
  737. }
  738. static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
  739. {
  740. struct scsi_cmnd *pcmd = ccb->pcmd;
  741. scsi_dma_unmap(pcmd);
  742. }
  743. static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
  744. {
  745. struct AdapterControlBlock *acb = ccb->acb;
  746. struct scsi_cmnd *pcmd = ccb->pcmd;
  747. unsigned long flags;
  748. atomic_dec(&acb->ccboutstandingcount);
  749. arcmsr_pci_unmap_dma(ccb);
  750. ccb->startdone = ARCMSR_CCB_DONE;
  751. spin_lock_irqsave(&acb->ccblist_lock, flags);
  752. list_add_tail(&ccb->list, &acb->ccb_free_list);
  753. spin_unlock_irqrestore(&acb->ccblist_lock, flags);
  754. pcmd->scsi_done(pcmd);
  755. }
  756. static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
  757. {
  758. struct scsi_cmnd *pcmd = ccb->pcmd;
  759. struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
  760. pcmd->result = DID_OK << 16;
  761. if (sensebuffer) {
  762. int sense_data_length =
  763. sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
  764. ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
  765. memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
  766. memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
  767. sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
  768. sensebuffer->Valid = 1;
  769. }
  770. }
  771. static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
  772. {
  773. u32 orig_mask = 0;
  774. switch (acb->adapter_type) {
  775. case ACB_ADAPTER_TYPE_A : {
  776. struct MessageUnit_A __iomem *reg = acb->pmuA;
  777. orig_mask = readl(&reg->outbound_intmask);
  778. writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
  779. &reg->outbound_intmask);
  780. }
  781. break;
  782. case ACB_ADAPTER_TYPE_B : {
  783. struct MessageUnit_B *reg = acb->pmuB;
  784. orig_mask = readl(reg->iop2drv_doorbell_mask);
  785. writel(0, reg->iop2drv_doorbell_mask);
  786. }
  787. break;
  788. case ACB_ADAPTER_TYPE_C:{
  789. struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
  790. /* disable all outbound interrupt */
  791. orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */
  792. writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
  793. }
  794. break;
  795. }
  796. return orig_mask;
  797. }
  798. static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
  799. struct CommandControlBlock *ccb, bool error)
  800. {
  801. uint8_t id, lun;
  802. id = ccb->pcmd->device->id;
  803. lun = ccb->pcmd->device->lun;
  804. if (!error) {
  805. if (acb->devstate[id][lun] == ARECA_RAID_GONE)
  806. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  807. ccb->pcmd->result = DID_OK << 16;
  808. arcmsr_ccb_complete(ccb);
  809. }else{
  810. switch (ccb->arcmsr_cdb.DeviceStatus) {
  811. case ARCMSR_DEV_SELECT_TIMEOUT: {
  812. acb->devstate[id][lun] = ARECA_RAID_GONE;
  813. ccb->pcmd->result = DID_NO_CONNECT << 16;
  814. arcmsr_ccb_complete(ccb);
  815. }
  816. break;
  817. case ARCMSR_DEV_ABORTED:
  818. case ARCMSR_DEV_INIT_FAIL: {
  819. acb->devstate[id][lun] = ARECA_RAID_GONE;
  820. ccb->pcmd->result = DID_BAD_TARGET << 16;
  821. arcmsr_ccb_complete(ccb);
  822. }
  823. break;
  824. case ARCMSR_DEV_CHECK_CONDITION: {
  825. acb->devstate[id][lun] = ARECA_RAID_GOOD;
  826. arcmsr_report_sense_info(ccb);
  827. arcmsr_ccb_complete(ccb);
  828. }
  829. break;
  830. default:
  831. printk(KERN_NOTICE
  832. "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
  833. but got unknown DeviceStatus = 0x%x \n"
  834. , acb->host->host_no
  835. , id
  836. , lun
  837. , ccb->arcmsr_cdb.DeviceStatus);
  838. acb->devstate[id][lun] = ARECA_RAID_GONE;
  839. ccb->pcmd->result = DID_NO_CONNECT << 16;
  840. arcmsr_ccb_complete(ccb);
  841. break;
  842. }
  843. }
  844. }
  845. static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
  846. {
  847. int id, lun;
  848. if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
  849. if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
  850. struct scsi_cmnd *abortcmd = pCCB->pcmd;
  851. if (abortcmd) {
  852. id = abortcmd->device->id;
  853. lun = abortcmd->device->lun;
  854. abortcmd->result |= DID_ABORT << 16;
  855. arcmsr_ccb_complete(pCCB);
  856. printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
  857. acb->host->host_no, pCCB);
  858. }
  859. return;
  860. }
  861. printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
  862. done acb = '0x%p'"
  863. "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
  864. " ccboutstandingcount = %d \n"
  865. , acb->host->host_no
  866. , acb
  867. , pCCB
  868. , pCCB->acb
  869. , pCCB->startdone
  870. , atomic_read(&acb->ccboutstandingcount));
  871. return;
  872. }
  873. arcmsr_report_ccb_state(acb, pCCB, error);
  874. }
  875. static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
  876. {
  877. int i = 0;
  878. uint32_t flag_ccb;
  879. struct ARCMSR_CDB *pARCMSR_CDB;
  880. bool error;
  881. struct CommandControlBlock *pCCB;
  882. switch (acb->adapter_type) {
  883. case ACB_ADAPTER_TYPE_A: {
  884. struct MessageUnit_A __iomem *reg = acb->pmuA;
  885. uint32_t outbound_intstatus;
  886. outbound_intstatus = readl(&reg->outbound_intstatus) &
  887. acb->outbound_int_enable;
  888. /*clear and abort all outbound posted Q*/
  889. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  890. while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
  891. && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
  892. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
  893. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  894. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  895. arcmsr_drain_donequeue(acb, pCCB, error);
  896. }
  897. }
  898. break;
  899. case ACB_ADAPTER_TYPE_B: {
  900. struct MessageUnit_B *reg = acb->pmuB;
  901. /*clear all outbound posted Q*/
  902. writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
  903. for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
  904. if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
  905. writel(0, &reg->done_qbuffer[i]);
  906. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
  907. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  908. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  909. arcmsr_drain_donequeue(acb, pCCB, error);
  910. }
  911. reg->post_qbuffer[i] = 0;
  912. }
  913. reg->doneq_index = 0;
  914. reg->postq_index = 0;
  915. }
  916. break;
  917. case ACB_ADAPTER_TYPE_C: {
  918. struct MessageUnit_C *reg = acb->pmuC;
  919. struct ARCMSR_CDB *pARCMSR_CDB;
  920. uint32_t flag_ccb, ccb_cdb_phy;
  921. bool error;
  922. struct CommandControlBlock *pCCB;
  923. while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
  924. /*need to do*/
  925. flag_ccb = readl(&reg->outbound_queueport_low);
  926. ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
  927. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/
  928. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  929. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
  930. arcmsr_drain_donequeue(acb, pCCB, error);
  931. }
  932. }
  933. }
  934. }
  935. static void arcmsr_remove(struct pci_dev *pdev)
  936. {
  937. struct Scsi_Host *host = pci_get_drvdata(pdev);
  938. struct AdapterControlBlock *acb =
  939. (struct AdapterControlBlock *) host->hostdata;
  940. int poll_count = 0;
  941. arcmsr_free_sysfs_attr(acb);
  942. scsi_remove_host(host);
  943. flush_work_sync(&acb->arcmsr_do_message_isr_bh);
  944. del_timer_sync(&acb->eternal_timer);
  945. arcmsr_disable_outbound_ints(acb);
  946. arcmsr_stop_adapter_bgrb(acb);
  947. arcmsr_flush_adapter_cache(acb);
  948. acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
  949. acb->acb_flags &= ~ACB_F_IOP_INITED;
  950. for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
  951. if (!atomic_read(&acb->ccboutstandingcount))
  952. break;
  953. arcmsr_interrupt(acb);/* FIXME: need spinlock */
  954. msleep(25);
  955. }
  956. if (atomic_read(&acb->ccboutstandingcount)) {
  957. int i;
  958. arcmsr_abort_allcmd(acb);
  959. arcmsr_done4abort_postqueue(acb);
  960. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  961. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  962. if (ccb->startdone == ARCMSR_CCB_START) {
  963. ccb->startdone = ARCMSR_CCB_ABORTED;
  964. ccb->pcmd->result = DID_ABORT << 16;
  965. arcmsr_ccb_complete(ccb);
  966. }
  967. }
  968. }
  969. free_irq(pdev->irq, acb);
  970. arcmsr_free_ccb_pool(acb);
  971. arcmsr_free_hbb_mu(acb);
  972. arcmsr_unmap_pciregion(acb);
  973. pci_release_regions(pdev);
  974. scsi_host_put(host);
  975. pci_disable_device(pdev);
  976. pci_set_drvdata(pdev, NULL);
  977. }
  978. static void arcmsr_shutdown(struct pci_dev *pdev)
  979. {
  980. struct Scsi_Host *host = pci_get_drvdata(pdev);
  981. struct AdapterControlBlock *acb =
  982. (struct AdapterControlBlock *)host->hostdata;
  983. del_timer_sync(&acb->eternal_timer);
  984. arcmsr_disable_outbound_ints(acb);
  985. flush_work_sync(&acb->arcmsr_do_message_isr_bh);
  986. arcmsr_stop_adapter_bgrb(acb);
  987. arcmsr_flush_adapter_cache(acb);
  988. }
  989. static int arcmsr_module_init(void)
  990. {
  991. int error = 0;
  992. error = pci_register_driver(&arcmsr_pci_driver);
  993. return error;
  994. }
  995. static void arcmsr_module_exit(void)
  996. {
  997. pci_unregister_driver(&arcmsr_pci_driver);
  998. }
  999. module_init(arcmsr_module_init);
  1000. module_exit(arcmsr_module_exit);
  1001. static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
  1002. u32 intmask_org)
  1003. {
  1004. u32 mask;
  1005. switch (acb->adapter_type) {
  1006. case ACB_ADAPTER_TYPE_A: {
  1007. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1008. mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
  1009. ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
  1010. ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
  1011. writel(mask, &reg->outbound_intmask);
  1012. acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
  1013. }
  1014. break;
  1015. case ACB_ADAPTER_TYPE_B: {
  1016. struct MessageUnit_B *reg = acb->pmuB;
  1017. mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
  1018. ARCMSR_IOP2DRV_DATA_READ_OK |
  1019. ARCMSR_IOP2DRV_CDB_DONE |
  1020. ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
  1021. writel(mask, reg->iop2drv_doorbell_mask);
  1022. acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
  1023. }
  1024. break;
  1025. case ACB_ADAPTER_TYPE_C: {
  1026. struct MessageUnit_C *reg = acb->pmuC;
  1027. mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
  1028. writel(intmask_org & mask, &reg->host_int_mask);
  1029. acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
  1030. }
  1031. }
  1032. }
  1033. static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
  1034. struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
  1035. {
  1036. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  1037. int8_t *psge = (int8_t *)&arcmsr_cdb->u;
  1038. __le32 address_lo, address_hi;
  1039. int arccdbsize = 0x30;
  1040. __le32 length = 0;
  1041. int i;
  1042. struct scatterlist *sg;
  1043. int nseg;
  1044. ccb->pcmd = pcmd;
  1045. memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
  1046. arcmsr_cdb->TargetID = pcmd->device->id;
  1047. arcmsr_cdb->LUN = pcmd->device->lun;
  1048. arcmsr_cdb->Function = 1;
  1049. arcmsr_cdb->Context = 0;
  1050. memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
  1051. nseg = scsi_dma_map(pcmd);
  1052. if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
  1053. return FAILED;
  1054. scsi_for_each_sg(pcmd, sg, nseg, i) {
  1055. /* Get the physical address of the current data pointer */
  1056. length = cpu_to_le32(sg_dma_len(sg));
  1057. address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
  1058. address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
  1059. if (address_hi == 0) {
  1060. struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
  1061. pdma_sg->address = address_lo;
  1062. pdma_sg->length = length;
  1063. psge += sizeof (struct SG32ENTRY);
  1064. arccdbsize += sizeof (struct SG32ENTRY);
  1065. } else {
  1066. struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
  1067. pdma_sg->addresshigh = address_hi;
  1068. pdma_sg->address = address_lo;
  1069. pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
  1070. psge += sizeof (struct SG64ENTRY);
  1071. arccdbsize += sizeof (struct SG64ENTRY);
  1072. }
  1073. }
  1074. arcmsr_cdb->sgcount = (uint8_t)nseg;
  1075. arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
  1076. arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
  1077. if ( arccdbsize > 256)
  1078. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
  1079. if (pcmd->sc_data_direction == DMA_TO_DEVICE)
  1080. arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
  1081. ccb->arc_cdb_size = arccdbsize;
  1082. return SUCCESS;
  1083. }
  1084. static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
  1085. {
  1086. uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
  1087. struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
  1088. atomic_inc(&acb->ccboutstandingcount);
  1089. ccb->startdone = ARCMSR_CCB_START;
  1090. switch (acb->adapter_type) {
  1091. case ACB_ADAPTER_TYPE_A: {
  1092. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1093. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
  1094. writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
  1095. &reg->inbound_queueport);
  1096. else {
  1097. writel(cdb_phyaddr_pattern, &reg->inbound_queueport);
  1098. }
  1099. }
  1100. break;
  1101. case ACB_ADAPTER_TYPE_B: {
  1102. struct MessageUnit_B *reg = acb->pmuB;
  1103. uint32_t ending_index, index = reg->postq_index;
  1104. ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
  1105. writel(0, &reg->post_qbuffer[ending_index]);
  1106. if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
  1107. writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
  1108. &reg->post_qbuffer[index]);
  1109. } else {
  1110. writel(cdb_phyaddr_pattern, &reg->post_qbuffer[index]);
  1111. }
  1112. index++;
  1113. index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
  1114. reg->postq_index = index;
  1115. writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
  1116. }
  1117. break;
  1118. case ACB_ADAPTER_TYPE_C: {
  1119. struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
  1120. uint32_t ccb_post_stamp, arc_cdb_size;
  1121. arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
  1122. ccb_post_stamp = (cdb_phyaddr_pattern | ((arc_cdb_size - 1) >> 6) | 1);
  1123. if (acb->cdb_phyaddr_hi32) {
  1124. writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
  1125. writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
  1126. } else {
  1127. writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
  1128. }
  1129. }
  1130. }
  1131. }
  1132. static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
  1133. {
  1134. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1135. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1136. writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
  1137. if (!arcmsr_hba_wait_msgint_ready(acb)) {
  1138. printk(KERN_NOTICE
  1139. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  1140. , acb->host->host_no);
  1141. }
  1142. }
  1143. static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
  1144. {
  1145. struct MessageUnit_B *reg = acb->pmuB;
  1146. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1147. writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
  1148. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  1149. printk(KERN_NOTICE
  1150. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  1151. , acb->host->host_no);
  1152. }
  1153. }
  1154. static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *pACB)
  1155. {
  1156. struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
  1157. pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1158. writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
  1159. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  1160. if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
  1161. printk(KERN_NOTICE
  1162. "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
  1163. , pACB->host->host_no);
  1164. }
  1165. return;
  1166. }
  1167. static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
  1168. {
  1169. switch (acb->adapter_type) {
  1170. case ACB_ADAPTER_TYPE_A: {
  1171. arcmsr_stop_hba_bgrb(acb);
  1172. }
  1173. break;
  1174. case ACB_ADAPTER_TYPE_B: {
  1175. arcmsr_stop_hbb_bgrb(acb);
  1176. }
  1177. break;
  1178. case ACB_ADAPTER_TYPE_C: {
  1179. arcmsr_stop_hbc_bgrb(acb);
  1180. }
  1181. }
  1182. }
  1183. static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
  1184. {
  1185. dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
  1186. }
  1187. void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
  1188. {
  1189. switch (acb->adapter_type) {
  1190. case ACB_ADAPTER_TYPE_A: {
  1191. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1192. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  1193. }
  1194. break;
  1195. case ACB_ADAPTER_TYPE_B: {
  1196. struct MessageUnit_B *reg = acb->pmuB;
  1197. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
  1198. }
  1199. break;
  1200. case ACB_ADAPTER_TYPE_C: {
  1201. struct MessageUnit_C __iomem *reg = acb->pmuC;
  1202. writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
  1203. }
  1204. }
  1205. }
  1206. static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
  1207. {
  1208. switch (acb->adapter_type) {
  1209. case ACB_ADAPTER_TYPE_A: {
  1210. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1211. /*
  1212. ** push inbound doorbell tell iop, driver data write ok
  1213. ** and wait reply on next hwinterrupt for next Qbuffer post
  1214. */
  1215. writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
  1216. }
  1217. break;
  1218. case ACB_ADAPTER_TYPE_B: {
  1219. struct MessageUnit_B *reg = acb->pmuB;
  1220. /*
  1221. ** push inbound doorbell tell iop, driver data write ok
  1222. ** and wait reply on next hwinterrupt for next Qbuffer post
  1223. */
  1224. writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
  1225. }
  1226. break;
  1227. case ACB_ADAPTER_TYPE_C: {
  1228. struct MessageUnit_C __iomem *reg = acb->pmuC;
  1229. /*
  1230. ** push inbound doorbell tell iop, driver data write ok
  1231. ** and wait reply on next hwinterrupt for next Qbuffer post
  1232. */
  1233. writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell);
  1234. }
  1235. break;
  1236. }
  1237. }
  1238. struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
  1239. {
  1240. struct QBUFFER __iomem *qbuffer = NULL;
  1241. switch (acb->adapter_type) {
  1242. case ACB_ADAPTER_TYPE_A: {
  1243. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1244. qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
  1245. }
  1246. break;
  1247. case ACB_ADAPTER_TYPE_B: {
  1248. struct MessageUnit_B *reg = acb->pmuB;
  1249. qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
  1250. }
  1251. break;
  1252. case ACB_ADAPTER_TYPE_C: {
  1253. struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
  1254. qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
  1255. }
  1256. }
  1257. return qbuffer;
  1258. }
  1259. static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
  1260. {
  1261. struct QBUFFER __iomem *pqbuffer = NULL;
  1262. switch (acb->adapter_type) {
  1263. case ACB_ADAPTER_TYPE_A: {
  1264. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1265. pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
  1266. }
  1267. break;
  1268. case ACB_ADAPTER_TYPE_B: {
  1269. struct MessageUnit_B *reg = acb->pmuB;
  1270. pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
  1271. }
  1272. break;
  1273. case ACB_ADAPTER_TYPE_C: {
  1274. struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
  1275. pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
  1276. }
  1277. }
  1278. return pqbuffer;
  1279. }
  1280. static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
  1281. {
  1282. struct QBUFFER __iomem *prbuffer;
  1283. struct QBUFFER *pQbuffer;
  1284. uint8_t __iomem *iop_data;
  1285. int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
  1286. rqbuf_lastindex = acb->rqbuf_lastindex;
  1287. rqbuf_firstindex = acb->rqbuf_firstindex;
  1288. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  1289. iop_data = (uint8_t __iomem *)prbuffer->data;
  1290. iop_len = prbuffer->data_len;
  1291. my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1);
  1292. if (my_empty_len >= iop_len)
  1293. {
  1294. while (iop_len > 0) {
  1295. pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
  1296. memcpy(pQbuffer, iop_data, 1);
  1297. rqbuf_lastindex++;
  1298. rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1299. iop_data++;
  1300. iop_len--;
  1301. }
  1302. acb->rqbuf_lastindex = rqbuf_lastindex;
  1303. arcmsr_iop_message_read(acb);
  1304. }
  1305. else {
  1306. acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
  1307. }
  1308. }
  1309. static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
  1310. {
  1311. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
  1312. if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
  1313. uint8_t *pQbuffer;
  1314. struct QBUFFER __iomem *pwbuffer;
  1315. uint8_t __iomem *iop_data;
  1316. int32_t allxfer_len = 0;
  1317. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1318. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1319. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1320. while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
  1321. (allxfer_len < 124)) {
  1322. pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
  1323. memcpy(iop_data, pQbuffer, 1);
  1324. acb->wqbuf_firstindex++;
  1325. acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1326. iop_data++;
  1327. allxfer_len++;
  1328. }
  1329. pwbuffer->data_len = allxfer_len;
  1330. arcmsr_iop_message_wrote(acb);
  1331. }
  1332. if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
  1333. acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1334. }
  1335. }
  1336. static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
  1337. {
  1338. uint32_t outbound_doorbell;
  1339. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1340. outbound_doorbell = readl(&reg->outbound_doorbell);
  1341. writel(outbound_doorbell, &reg->outbound_doorbell);
  1342. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
  1343. arcmsr_iop2drv_data_wrote_handle(acb);
  1344. }
  1345. if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
  1346. arcmsr_iop2drv_data_read_handle(acb);
  1347. }
  1348. }
  1349. static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB)
  1350. {
  1351. uint32_t outbound_doorbell;
  1352. struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
  1353. /*
  1354. *******************************************************************
  1355. ** Maybe here we need to check wrqbuffer_lock is lock or not
  1356. ** DOORBELL: din! don!
  1357. ** check if there are any mail need to pack from firmware
  1358. *******************************************************************
  1359. */
  1360. outbound_doorbell = readl(&reg->outbound_doorbell);
  1361. writel(outbound_doorbell, &reg->outbound_doorbell_clear);/*clear interrupt*/
  1362. if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
  1363. arcmsr_iop2drv_data_wrote_handle(pACB);
  1364. }
  1365. if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
  1366. arcmsr_iop2drv_data_read_handle(pACB);
  1367. }
  1368. if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
  1369. arcmsr_hbc_message_isr(pACB); /* messenger of "driver to iop commands" */
  1370. }
  1371. return;
  1372. }
  1373. static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
  1374. {
  1375. uint32_t flag_ccb;
  1376. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1377. struct ARCMSR_CDB *pARCMSR_CDB;
  1378. struct CommandControlBlock *pCCB;
  1379. bool error;
  1380. while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
  1381. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
  1382. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  1383. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  1384. arcmsr_drain_donequeue(acb, pCCB, error);
  1385. }
  1386. }
  1387. static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
  1388. {
  1389. uint32_t index;
  1390. uint32_t flag_ccb;
  1391. struct MessageUnit_B *reg = acb->pmuB;
  1392. struct ARCMSR_CDB *pARCMSR_CDB;
  1393. struct CommandControlBlock *pCCB;
  1394. bool error;
  1395. index = reg->doneq_index;
  1396. while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
  1397. writel(0, &reg->done_qbuffer[index]);
  1398. pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
  1399. pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
  1400. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  1401. arcmsr_drain_donequeue(acb, pCCB, error);
  1402. index++;
  1403. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  1404. reg->doneq_index = index;
  1405. }
  1406. }
  1407. static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
  1408. {
  1409. struct MessageUnit_C *phbcmu;
  1410. struct ARCMSR_CDB *arcmsr_cdb;
  1411. struct CommandControlBlock *ccb;
  1412. uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
  1413. int error;
  1414. phbcmu = (struct MessageUnit_C *)acb->pmuC;
  1415. /* areca cdb command done */
  1416. /* Use correct offset and size for syncing */
  1417. while (readl(&phbcmu->host_int_status) &
  1418. ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR){
  1419. /* check if command done with no error*/
  1420. flag_ccb = readl(&phbcmu->outbound_queueport_low);
  1421. ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);/*frame must be 32 bytes aligned*/
  1422. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
  1423. ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
  1424. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
  1425. /* check if command done with no error */
  1426. arcmsr_drain_donequeue(acb, ccb, error);
  1427. if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
  1428. writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, &phbcmu->inbound_doorbell);
  1429. break;
  1430. }
  1431. throttling++;
  1432. }
  1433. }
  1434. /*
  1435. **********************************************************************************
  1436. ** Handle a message interrupt
  1437. **
  1438. ** The only message interrupt we expect is in response to a query for the current adapter config.
  1439. ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
  1440. **********************************************************************************
  1441. */
  1442. static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
  1443. {
  1444. struct MessageUnit_A *reg = acb->pmuA;
  1445. /*clear interrupt and message state*/
  1446. writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
  1447. schedule_work(&acb->arcmsr_do_message_isr_bh);
  1448. }
  1449. static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
  1450. {
  1451. struct MessageUnit_B *reg = acb->pmuB;
  1452. /*clear interrupt and message state*/
  1453. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
  1454. schedule_work(&acb->arcmsr_do_message_isr_bh);
  1455. }
  1456. /*
  1457. **********************************************************************************
  1458. ** Handle a message interrupt
  1459. **
  1460. ** The only message interrupt we expect is in response to a query for the
  1461. ** current adapter config.
  1462. ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
  1463. **********************************************************************************
  1464. */
  1465. static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb)
  1466. {
  1467. struct MessageUnit_C *reg = acb->pmuC;
  1468. /*clear interrupt and message state*/
  1469. writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
  1470. schedule_work(&acb->arcmsr_do_message_isr_bh);
  1471. }
  1472. static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
  1473. {
  1474. uint32_t outbound_intstatus;
  1475. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1476. outbound_intstatus = readl(&reg->outbound_intstatus) &
  1477. acb->outbound_int_enable;
  1478. if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
  1479. return 1;
  1480. }
  1481. writel(outbound_intstatus, &reg->outbound_intstatus);
  1482. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
  1483. arcmsr_hba_doorbell_isr(acb);
  1484. }
  1485. if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
  1486. arcmsr_hba_postqueue_isr(acb);
  1487. }
  1488. if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
  1489. /* messenger of "driver to iop commands" */
  1490. arcmsr_hba_message_isr(acb);
  1491. }
  1492. return 0;
  1493. }
  1494. static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
  1495. {
  1496. uint32_t outbound_doorbell;
  1497. struct MessageUnit_B *reg = acb->pmuB;
  1498. outbound_doorbell = readl(reg->iop2drv_doorbell) &
  1499. acb->outbound_int_enable;
  1500. if (!outbound_doorbell)
  1501. return 1;
  1502. writel(~outbound_doorbell, reg->iop2drv_doorbell);
  1503. /*in case the last action of doorbell interrupt clearance is cached,
  1504. this action can push HW to write down the clear bit*/
  1505. readl(reg->iop2drv_doorbell);
  1506. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
  1507. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
  1508. arcmsr_iop2drv_data_wrote_handle(acb);
  1509. }
  1510. if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
  1511. arcmsr_iop2drv_data_read_handle(acb);
  1512. }
  1513. if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
  1514. arcmsr_hbb_postqueue_isr(acb);
  1515. }
  1516. if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
  1517. /* messenger of "driver to iop commands" */
  1518. arcmsr_hbb_message_isr(acb);
  1519. }
  1520. return 0;
  1521. }
  1522. static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB)
  1523. {
  1524. uint32_t host_interrupt_status;
  1525. struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
  1526. /*
  1527. *********************************************
  1528. ** check outbound intstatus
  1529. *********************************************
  1530. */
  1531. host_interrupt_status = readl(&phbcmu->host_int_status);
  1532. if (!host_interrupt_status) {
  1533. /*it must be share irq*/
  1534. return 1;
  1535. }
  1536. /* MU ioctl transfer doorbell interrupts*/
  1537. if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
  1538. arcmsr_hbc_doorbell_isr(pACB); /* messenger of "ioctl message read write" */
  1539. }
  1540. /* MU post queue interrupts*/
  1541. if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
  1542. arcmsr_hbc_postqueue_isr(pACB); /* messenger of "scsi commands" */
  1543. }
  1544. return 0;
  1545. }
  1546. static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
  1547. {
  1548. switch (acb->adapter_type) {
  1549. case ACB_ADAPTER_TYPE_A: {
  1550. if (arcmsr_handle_hba_isr(acb)) {
  1551. return IRQ_NONE;
  1552. }
  1553. }
  1554. break;
  1555. case ACB_ADAPTER_TYPE_B: {
  1556. if (arcmsr_handle_hbb_isr(acb)) {
  1557. return IRQ_NONE;
  1558. }
  1559. }
  1560. break;
  1561. case ACB_ADAPTER_TYPE_C: {
  1562. if (arcmsr_handle_hbc_isr(acb)) {
  1563. return IRQ_NONE;
  1564. }
  1565. }
  1566. }
  1567. return IRQ_HANDLED;
  1568. }
  1569. static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
  1570. {
  1571. if (acb) {
  1572. /* stop adapter background rebuild */
  1573. if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
  1574. uint32_t intmask_org;
  1575. acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
  1576. intmask_org = arcmsr_disable_outbound_ints(acb);
  1577. arcmsr_stop_adapter_bgrb(acb);
  1578. arcmsr_flush_adapter_cache(acb);
  1579. arcmsr_enable_outbound_ints(acb, intmask_org);
  1580. }
  1581. }
  1582. }
  1583. void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
  1584. {
  1585. int32_t wqbuf_firstindex, wqbuf_lastindex;
  1586. uint8_t *pQbuffer;
  1587. struct QBUFFER __iomem *pwbuffer;
  1588. uint8_t __iomem *iop_data;
  1589. int32_t allxfer_len = 0;
  1590. pwbuffer = arcmsr_get_iop_wqbuffer(acb);
  1591. iop_data = (uint8_t __iomem *)pwbuffer->data;
  1592. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
  1593. acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
  1594. wqbuf_firstindex = acb->wqbuf_firstindex;
  1595. wqbuf_lastindex = acb->wqbuf_lastindex;
  1596. while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
  1597. pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
  1598. memcpy(iop_data, pQbuffer, 1);
  1599. wqbuf_firstindex++;
  1600. wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1601. iop_data++;
  1602. allxfer_len++;
  1603. }
  1604. acb->wqbuf_firstindex = wqbuf_firstindex;
  1605. pwbuffer->data_len = allxfer_len;
  1606. arcmsr_iop_message_wrote(acb);
  1607. }
  1608. }
  1609. static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
  1610. struct scsi_cmnd *cmd)
  1611. {
  1612. struct CMD_MESSAGE_FIELD *pcmdmessagefld;
  1613. int retvalue = 0, transfer_len = 0;
  1614. char *buffer;
  1615. struct scatterlist *sg;
  1616. uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
  1617. (uint32_t ) cmd->cmnd[6] << 16 |
  1618. (uint32_t ) cmd->cmnd[7] << 8 |
  1619. (uint32_t ) cmd->cmnd[8];
  1620. /* 4 bytes: Areca io control code */
  1621. sg = scsi_sglist(cmd);
  1622. buffer = kmap_atomic(sg_page(sg)) + sg->offset;
  1623. if (scsi_sg_count(cmd) > 1) {
  1624. retvalue = ARCMSR_MESSAGE_FAIL;
  1625. goto message_out;
  1626. }
  1627. transfer_len += sg->length;
  1628. if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
  1629. retvalue = ARCMSR_MESSAGE_FAIL;
  1630. goto message_out;
  1631. }
  1632. pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
  1633. switch(controlcode) {
  1634. case ARCMSR_MESSAGE_READ_RQBUFFER: {
  1635. unsigned char *ver_addr;
  1636. uint8_t *pQbuffer, *ptmpQbuffer;
  1637. int32_t allxfer_len = 0;
  1638. ver_addr = kmalloc(1032, GFP_ATOMIC);
  1639. if (!ver_addr) {
  1640. retvalue = ARCMSR_MESSAGE_FAIL;
  1641. goto message_out;
  1642. }
  1643. ptmpQbuffer = ver_addr;
  1644. while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
  1645. && (allxfer_len < 1031)) {
  1646. pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
  1647. memcpy(ptmpQbuffer, pQbuffer, 1);
  1648. acb->rqbuf_firstindex++;
  1649. acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
  1650. ptmpQbuffer++;
  1651. allxfer_len++;
  1652. }
  1653. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1654. struct QBUFFER __iomem *prbuffer;
  1655. uint8_t __iomem *iop_data;
  1656. int32_t iop_len;
  1657. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1658. prbuffer = arcmsr_get_iop_rqbuffer(acb);
  1659. iop_data = prbuffer->data;
  1660. iop_len = readl(&prbuffer->data_len);
  1661. while (iop_len > 0) {
  1662. acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
  1663. acb->rqbuf_lastindex++;
  1664. acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1665. iop_data++;
  1666. iop_len--;
  1667. }
  1668. arcmsr_iop_message_read(acb);
  1669. }
  1670. memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
  1671. pcmdmessagefld->cmdmessage.Length = allxfer_len;
  1672. if(acb->fw_flag == FW_DEADLOCK) {
  1673. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1674. }else{
  1675. pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
  1676. }
  1677. kfree(ver_addr);
  1678. }
  1679. break;
  1680. case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
  1681. unsigned char *ver_addr;
  1682. int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
  1683. uint8_t *pQbuffer, *ptmpuserbuffer;
  1684. ver_addr = kmalloc(1032, GFP_ATOMIC);
  1685. if (!ver_addr) {
  1686. retvalue = ARCMSR_MESSAGE_FAIL;
  1687. goto message_out;
  1688. }
  1689. if(acb->fw_flag == FW_DEADLOCK) {
  1690. pcmdmessagefld->cmdmessage.ReturnCode =
  1691. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1692. }else{
  1693. pcmdmessagefld->cmdmessage.ReturnCode =
  1694. ARCMSR_MESSAGE_RETURNCODE_OK;
  1695. }
  1696. ptmpuserbuffer = ver_addr;
  1697. user_len = pcmdmessagefld->cmdmessage.Length;
  1698. memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
  1699. wqbuf_lastindex = acb->wqbuf_lastindex;
  1700. wqbuf_firstindex = acb->wqbuf_firstindex;
  1701. if (wqbuf_lastindex != wqbuf_firstindex) {
  1702. struct SENSE_DATA *sensebuffer =
  1703. (struct SENSE_DATA *)cmd->sense_buffer;
  1704. arcmsr_post_ioctldata2iop(acb);
  1705. /* has error report sensedata */
  1706. sensebuffer->ErrorCode = 0x70;
  1707. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  1708. sensebuffer->AdditionalSenseLength = 0x0A;
  1709. sensebuffer->AdditionalSenseCode = 0x20;
  1710. sensebuffer->Valid = 1;
  1711. retvalue = ARCMSR_MESSAGE_FAIL;
  1712. } else {
  1713. my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
  1714. &(ARCMSR_MAX_QBUFFER - 1);
  1715. if (my_empty_len >= user_len) {
  1716. while (user_len > 0) {
  1717. pQbuffer =
  1718. &acb->wqbuffer[acb->wqbuf_lastindex];
  1719. memcpy(pQbuffer, ptmpuserbuffer, 1);
  1720. acb->wqbuf_lastindex++;
  1721. acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
  1722. ptmpuserbuffer++;
  1723. user_len--;
  1724. }
  1725. if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
  1726. acb->acb_flags &=
  1727. ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
  1728. arcmsr_post_ioctldata2iop(acb);
  1729. }
  1730. } else {
  1731. /* has error report sensedata */
  1732. struct SENSE_DATA *sensebuffer =
  1733. (struct SENSE_DATA *)cmd->sense_buffer;
  1734. sensebuffer->ErrorCode = 0x70;
  1735. sensebuffer->SenseKey = ILLEGAL_REQUEST;
  1736. sensebuffer->AdditionalSenseLength = 0x0A;
  1737. sensebuffer->AdditionalSenseCode = 0x20;
  1738. sensebuffer->Valid = 1;
  1739. retvalue = ARCMSR_MESSAGE_FAIL;
  1740. }
  1741. }
  1742. kfree(ver_addr);
  1743. }
  1744. break;
  1745. case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
  1746. uint8_t *pQbuffer = acb->rqbuffer;
  1747. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1748. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1749. arcmsr_iop_message_read(acb);
  1750. }
  1751. acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
  1752. acb->rqbuf_firstindex = 0;
  1753. acb->rqbuf_lastindex = 0;
  1754. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  1755. if(acb->fw_flag == FW_DEADLOCK) {
  1756. pcmdmessagefld->cmdmessage.ReturnCode =
  1757. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1758. }else{
  1759. pcmdmessagefld->cmdmessage.ReturnCode =
  1760. ARCMSR_MESSAGE_RETURNCODE_OK;
  1761. }
  1762. }
  1763. break;
  1764. case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
  1765. uint8_t *pQbuffer = acb->wqbuffer;
  1766. if(acb->fw_flag == FW_DEADLOCK) {
  1767. pcmdmessagefld->cmdmessage.ReturnCode =
  1768. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1769. }else{
  1770. pcmdmessagefld->cmdmessage.ReturnCode =
  1771. ARCMSR_MESSAGE_RETURNCODE_OK;
  1772. }
  1773. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1774. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1775. arcmsr_iop_message_read(acb);
  1776. }
  1777. acb->acb_flags |=
  1778. (ACB_F_MESSAGE_WQBUFFER_CLEARED |
  1779. ACB_F_MESSAGE_WQBUFFER_READED);
  1780. acb->wqbuf_firstindex = 0;
  1781. acb->wqbuf_lastindex = 0;
  1782. memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
  1783. }
  1784. break;
  1785. case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
  1786. uint8_t *pQbuffer;
  1787. if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
  1788. acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
  1789. arcmsr_iop_message_read(acb);
  1790. }
  1791. acb->acb_flags |=
  1792. (ACB_F_MESSAGE_WQBUFFER_CLEARED
  1793. | ACB_F_MESSAGE_RQBUFFER_CLEARED
  1794. | ACB_F_MESSAGE_WQBUFFER_READED);
  1795. acb->rqbuf_firstindex = 0;
  1796. acb->rqbuf_lastindex = 0;
  1797. acb->wqbuf_firstindex = 0;
  1798. acb->wqbuf_lastindex = 0;
  1799. pQbuffer = acb->rqbuffer;
  1800. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  1801. pQbuffer = acb->wqbuffer;
  1802. memset(pQbuffer, 0, sizeof(struct QBUFFER));
  1803. if(acb->fw_flag == FW_DEADLOCK) {
  1804. pcmdmessagefld->cmdmessage.ReturnCode =
  1805. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1806. }else{
  1807. pcmdmessagefld->cmdmessage.ReturnCode =
  1808. ARCMSR_MESSAGE_RETURNCODE_OK;
  1809. }
  1810. }
  1811. break;
  1812. case ARCMSR_MESSAGE_RETURN_CODE_3F: {
  1813. if(acb->fw_flag == FW_DEADLOCK) {
  1814. pcmdmessagefld->cmdmessage.ReturnCode =
  1815. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1816. }else{
  1817. pcmdmessagefld->cmdmessage.ReturnCode =
  1818. ARCMSR_MESSAGE_RETURNCODE_3F;
  1819. }
  1820. break;
  1821. }
  1822. case ARCMSR_MESSAGE_SAY_HELLO: {
  1823. int8_t *hello_string = "Hello! I am ARCMSR";
  1824. if(acb->fw_flag == FW_DEADLOCK) {
  1825. pcmdmessagefld->cmdmessage.ReturnCode =
  1826. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1827. }else{
  1828. pcmdmessagefld->cmdmessage.ReturnCode =
  1829. ARCMSR_MESSAGE_RETURNCODE_OK;
  1830. }
  1831. memcpy(pcmdmessagefld->messagedatabuffer, hello_string
  1832. , (int16_t)strlen(hello_string));
  1833. }
  1834. break;
  1835. case ARCMSR_MESSAGE_SAY_GOODBYE:
  1836. if(acb->fw_flag == FW_DEADLOCK) {
  1837. pcmdmessagefld->cmdmessage.ReturnCode =
  1838. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1839. }
  1840. arcmsr_iop_parking(acb);
  1841. break;
  1842. case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
  1843. if(acb->fw_flag == FW_DEADLOCK) {
  1844. pcmdmessagefld->cmdmessage.ReturnCode =
  1845. ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
  1846. }
  1847. arcmsr_flush_adapter_cache(acb);
  1848. break;
  1849. default:
  1850. retvalue = ARCMSR_MESSAGE_FAIL;
  1851. }
  1852. message_out:
  1853. sg = scsi_sglist(cmd);
  1854. kunmap_atomic(buffer - sg->offset);
  1855. return retvalue;
  1856. }
  1857. static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
  1858. {
  1859. struct list_head *head = &acb->ccb_free_list;
  1860. struct CommandControlBlock *ccb = NULL;
  1861. unsigned long flags;
  1862. spin_lock_irqsave(&acb->ccblist_lock, flags);
  1863. if (!list_empty(head)) {
  1864. ccb = list_entry(head->next, struct CommandControlBlock, list);
  1865. list_del_init(&ccb->list);
  1866. }else{
  1867. spin_unlock_irqrestore(&acb->ccblist_lock, flags);
  1868. return 0;
  1869. }
  1870. spin_unlock_irqrestore(&acb->ccblist_lock, flags);
  1871. return ccb;
  1872. }
  1873. static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
  1874. struct scsi_cmnd *cmd)
  1875. {
  1876. switch (cmd->cmnd[0]) {
  1877. case INQUIRY: {
  1878. unsigned char inqdata[36];
  1879. char *buffer;
  1880. struct scatterlist *sg;
  1881. if (cmd->device->lun) {
  1882. cmd->result = (DID_TIME_OUT << 16);
  1883. cmd->scsi_done(cmd);
  1884. return;
  1885. }
  1886. inqdata[0] = TYPE_PROCESSOR;
  1887. /* Periph Qualifier & Periph Dev Type */
  1888. inqdata[1] = 0;
  1889. /* rem media bit & Dev Type Modifier */
  1890. inqdata[2] = 0;
  1891. /* ISO, ECMA, & ANSI versions */
  1892. inqdata[4] = 31;
  1893. /* length of additional data */
  1894. strncpy(&inqdata[8], "Areca ", 8);
  1895. /* Vendor Identification */
  1896. strncpy(&inqdata[16], "RAID controller ", 16);
  1897. /* Product Identification */
  1898. strncpy(&inqdata[32], "R001", 4); /* Product Revision */
  1899. sg = scsi_sglist(cmd);
  1900. buffer = kmap_atomic(sg_page(sg)) + sg->offset;
  1901. memcpy(buffer, inqdata, sizeof(inqdata));
  1902. sg = scsi_sglist(cmd);
  1903. kunmap_atomic(buffer - sg->offset);
  1904. cmd->scsi_done(cmd);
  1905. }
  1906. break;
  1907. case WRITE_BUFFER:
  1908. case READ_BUFFER: {
  1909. if (arcmsr_iop_message_xfer(acb, cmd))
  1910. cmd->result = (DID_ERROR << 16);
  1911. cmd->scsi_done(cmd);
  1912. }
  1913. break;
  1914. default:
  1915. cmd->scsi_done(cmd);
  1916. }
  1917. }
  1918. static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
  1919. void (* done)(struct scsi_cmnd *))
  1920. {
  1921. struct Scsi_Host *host = cmd->device->host;
  1922. struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
  1923. struct CommandControlBlock *ccb;
  1924. int target = cmd->device->id;
  1925. int lun = cmd->device->lun;
  1926. uint8_t scsicmd = cmd->cmnd[0];
  1927. cmd->scsi_done = done;
  1928. cmd->host_scribble = NULL;
  1929. cmd->result = 0;
  1930. if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
  1931. if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
  1932. cmd->result = (DID_NO_CONNECT << 16);
  1933. }
  1934. cmd->scsi_done(cmd);
  1935. return 0;
  1936. }
  1937. if (target == 16) {
  1938. /* virtual device for iop message transfer */
  1939. arcmsr_handle_virtual_command(acb, cmd);
  1940. return 0;
  1941. }
  1942. if (atomic_read(&acb->ccboutstandingcount) >=
  1943. ARCMSR_MAX_OUTSTANDING_CMD)
  1944. return SCSI_MLQUEUE_HOST_BUSY;
  1945. ccb = arcmsr_get_freeccb(acb);
  1946. if (!ccb)
  1947. return SCSI_MLQUEUE_HOST_BUSY;
  1948. if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
  1949. cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
  1950. cmd->scsi_done(cmd);
  1951. return 0;
  1952. }
  1953. arcmsr_post_ccb(acb, ccb);
  1954. return 0;
  1955. }
  1956. static DEF_SCSI_QCMD(arcmsr_queue_command)
  1957. static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
  1958. {
  1959. struct MessageUnit_A __iomem *reg = acb->pmuA;
  1960. char *acb_firm_model = acb->firm_model;
  1961. char *acb_firm_version = acb->firm_version;
  1962. char *acb_device_map = acb->device_map;
  1963. char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
  1964. char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
  1965. char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
  1966. int count;
  1967. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  1968. if (!arcmsr_hba_wait_msgint_ready(acb)) {
  1969. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  1970. miscellaneous data' timeout \n", acb->host->host_no);
  1971. return false;
  1972. }
  1973. count = 8;
  1974. while (count){
  1975. *acb_firm_model = readb(iop_firm_model);
  1976. acb_firm_model++;
  1977. iop_firm_model++;
  1978. count--;
  1979. }
  1980. count = 16;
  1981. while (count){
  1982. *acb_firm_version = readb(iop_firm_version);
  1983. acb_firm_version++;
  1984. iop_firm_version++;
  1985. count--;
  1986. }
  1987. count=16;
  1988. while(count){
  1989. *acb_device_map = readb(iop_device_map);
  1990. acb_device_map++;
  1991. iop_device_map++;
  1992. count--;
  1993. }
  1994. printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
  1995. acb->host->host_no,
  1996. acb->firm_version,
  1997. acb->firm_model);
  1998. acb->signature = readl(&reg->message_rwbuffer[0]);
  1999. acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
  2000. acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
  2001. acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
  2002. acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
  2003. acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
  2004. return true;
  2005. }
  2006. static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
  2007. {
  2008. struct MessageUnit_B *reg = acb->pmuB;
  2009. struct pci_dev *pdev = acb->pdev;
  2010. void *dma_coherent;
  2011. dma_addr_t dma_coherent_handle;
  2012. char *acb_firm_model = acb->firm_model;
  2013. char *acb_firm_version = acb->firm_version;
  2014. char *acb_device_map = acb->device_map;
  2015. char __iomem *iop_firm_model;
  2016. /*firm_model,15,60-67*/
  2017. char __iomem *iop_firm_version;
  2018. /*firm_version,17,68-83*/
  2019. char __iomem *iop_device_map;
  2020. /*firm_version,21,84-99*/
  2021. int count;
  2022. dma_coherent = dma_alloc_coherent(&pdev->dev, sizeof(struct MessageUnit_B), &dma_coherent_handle, GFP_KERNEL);
  2023. if (!dma_coherent){
  2024. printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", acb->host->host_no);
  2025. return false;
  2026. }
  2027. acb->dma_coherent_handle_hbb_mu = dma_coherent_handle;
  2028. reg = (struct MessageUnit_B *)dma_coherent;
  2029. acb->pmuB = reg;
  2030. reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL);
  2031. reg->drv2iop_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL_MASK);
  2032. reg->iop2drv_doorbell = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL);
  2033. reg->iop2drv_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL_MASK);
  2034. reg->message_wbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_WBUFFER);
  2035. reg->message_rbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RBUFFER);
  2036. reg->message_rwbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RWBUFFER);
  2037. iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); /*firm_model,15,60-67*/
  2038. iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); /*firm_version,17,68-83*/
  2039. iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
  2040. writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
  2041. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  2042. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  2043. miscellaneous data' timeout \n", acb->host->host_no);
  2044. return false;
  2045. }
  2046. count = 8;
  2047. while (count){
  2048. *acb_firm_model = readb(iop_firm_model);
  2049. acb_firm_model++;
  2050. iop_firm_model++;
  2051. count--;
  2052. }
  2053. count = 16;
  2054. while (count){
  2055. *acb_firm_version = readb(iop_firm_version);
  2056. acb_firm_version++;
  2057. iop_firm_version++;
  2058. count--;
  2059. }
  2060. count = 16;
  2061. while(count){
  2062. *acb_device_map = readb(iop_device_map);
  2063. acb_device_map++;
  2064. iop_device_map++;
  2065. count--;
  2066. }
  2067. printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
  2068. acb->host->host_no,
  2069. acb->firm_version,
  2070. acb->firm_model);
  2071. acb->signature = readl(&reg->message_rwbuffer[1]);
  2072. /*firm_signature,1,00-03*/
  2073. acb->firm_request_len = readl(&reg->message_rwbuffer[2]);
  2074. /*firm_request_len,1,04-07*/
  2075. acb->firm_numbers_queue = readl(&reg->message_rwbuffer[3]);
  2076. /*firm_numbers_queue,2,08-11*/
  2077. acb->firm_sdram_size = readl(&reg->message_rwbuffer[4]);
  2078. /*firm_sdram_size,3,12-15*/
  2079. acb->firm_hd_channels = readl(&reg->message_rwbuffer[5]);
  2080. /*firm_ide_channels,4,16-19*/
  2081. acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
  2082. /*firm_ide_channels,4,16-19*/
  2083. return true;
  2084. }
  2085. static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB)
  2086. {
  2087. uint32_t intmask_org, Index, firmware_state = 0;
  2088. struct MessageUnit_C *reg = pACB->pmuC;
  2089. char *acb_firm_model = pACB->firm_model;
  2090. char *acb_firm_version = pACB->firm_version;
  2091. char *iop_firm_model = (char *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
  2092. char *iop_firm_version = (char *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
  2093. int count;
  2094. /* disable all outbound interrupt */
  2095. intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
  2096. writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
  2097. /* wait firmware ready */
  2098. do {
  2099. firmware_state = readl(&reg->outbound_msgaddr1);
  2100. } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
  2101. /* post "get config" instruction */
  2102. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  2103. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  2104. /* wait message ready */
  2105. for (Index = 0; Index < 2000; Index++) {
  2106. if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
  2107. writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
  2108. break;
  2109. }
  2110. udelay(10);
  2111. } /*max 1 seconds*/
  2112. if (Index >= 2000) {
  2113. printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
  2114. miscellaneous data' timeout \n", pACB->host->host_no);
  2115. return false;
  2116. }
  2117. count = 8;
  2118. while (count) {
  2119. *acb_firm_model = readb(iop_firm_model);
  2120. acb_firm_model++;
  2121. iop_firm_model++;
  2122. count--;
  2123. }
  2124. count = 16;
  2125. while (count) {
  2126. *acb_firm_version = readb(iop_firm_version);
  2127. acb_firm_version++;
  2128. iop_firm_version++;
  2129. count--;
  2130. }
  2131. printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
  2132. pACB->host->host_no,
  2133. pACB->firm_version,
  2134. pACB->firm_model);
  2135. pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
  2136. pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
  2137. pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
  2138. pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/
  2139. pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
  2140. /*all interrupt service will be enable at arcmsr_iop_init*/
  2141. return true;
  2142. }
  2143. static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
  2144. {
  2145. if (acb->adapter_type == ACB_ADAPTER_TYPE_A)
  2146. return arcmsr_get_hba_config(acb);
  2147. else if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
  2148. return arcmsr_get_hbb_config(acb);
  2149. else
  2150. return arcmsr_get_hbc_config(acb);
  2151. }
  2152. static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
  2153. struct CommandControlBlock *poll_ccb)
  2154. {
  2155. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2156. struct CommandControlBlock *ccb;
  2157. struct ARCMSR_CDB *arcmsr_cdb;
  2158. uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
  2159. int rtn;
  2160. bool error;
  2161. polling_hba_ccb_retry:
  2162. poll_count++;
  2163. outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
  2164. writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
  2165. while (1) {
  2166. if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
  2167. if (poll_ccb_done){
  2168. rtn = SUCCESS;
  2169. break;
  2170. }else {
  2171. msleep(25);
  2172. if (poll_count > 100){
  2173. rtn = FAILED;
  2174. break;
  2175. }
  2176. goto polling_hba_ccb_retry;
  2177. }
  2178. }
  2179. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
  2180. ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
  2181. poll_ccb_done = (ccb == poll_ccb) ? 1:0;
  2182. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  2183. if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
  2184. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  2185. " poll command abort successfully \n"
  2186. , acb->host->host_no
  2187. , ccb->pcmd->device->id
  2188. , ccb->pcmd->device->lun
  2189. , ccb);
  2190. ccb->pcmd->result = DID_ABORT << 16;
  2191. arcmsr_ccb_complete(ccb);
  2192. continue;
  2193. }
  2194. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  2195. " command done ccb = '0x%p'"
  2196. "ccboutstandingcount = %d \n"
  2197. , acb->host->host_no
  2198. , ccb
  2199. , atomic_read(&acb->ccboutstandingcount));
  2200. continue;
  2201. }
  2202. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  2203. arcmsr_report_ccb_state(acb, ccb, error);
  2204. }
  2205. return rtn;
  2206. }
  2207. static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
  2208. struct CommandControlBlock *poll_ccb)
  2209. {
  2210. struct MessageUnit_B *reg = acb->pmuB;
  2211. struct ARCMSR_CDB *arcmsr_cdb;
  2212. struct CommandControlBlock *ccb;
  2213. uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
  2214. int index, rtn;
  2215. bool error;
  2216. polling_hbb_ccb_retry:
  2217. poll_count++;
  2218. /* clear doorbell interrupt */
  2219. writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
  2220. while(1){
  2221. index = reg->doneq_index;
  2222. if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
  2223. if (poll_ccb_done){
  2224. rtn = SUCCESS;
  2225. break;
  2226. }else {
  2227. msleep(25);
  2228. if (poll_count > 100){
  2229. rtn = FAILED;
  2230. break;
  2231. }
  2232. goto polling_hbb_ccb_retry;
  2233. }
  2234. }
  2235. writel(0, &reg->done_qbuffer[index]);
  2236. index++;
  2237. /*if last index number set it to 0 */
  2238. index %= ARCMSR_MAX_HBB_POSTQUEUE;
  2239. reg->doneq_index = index;
  2240. /* check if command done with no error*/
  2241. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
  2242. ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
  2243. poll_ccb_done = (ccb == poll_ccb) ? 1:0;
  2244. if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
  2245. if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
  2246. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  2247. " poll command abort successfully \n"
  2248. ,acb->host->host_no
  2249. ,ccb->pcmd->device->id
  2250. ,ccb->pcmd->device->lun
  2251. ,ccb);
  2252. ccb->pcmd->result = DID_ABORT << 16;
  2253. arcmsr_ccb_complete(ccb);
  2254. continue;
  2255. }
  2256. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  2257. " command done ccb = '0x%p'"
  2258. "ccboutstandingcount = %d \n"
  2259. , acb->host->host_no
  2260. , ccb
  2261. , atomic_read(&acb->ccboutstandingcount));
  2262. continue;
  2263. }
  2264. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
  2265. arcmsr_report_ccb_state(acb, ccb, error);
  2266. }
  2267. return rtn;
  2268. }
  2269. static int arcmsr_polling_hbc_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb)
  2270. {
  2271. struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
  2272. uint32_t flag_ccb, ccb_cdb_phy;
  2273. struct ARCMSR_CDB *arcmsr_cdb;
  2274. bool error;
  2275. struct CommandControlBlock *pCCB;
  2276. uint32_t poll_ccb_done = 0, poll_count = 0;
  2277. int rtn;
  2278. polling_hbc_ccb_retry:
  2279. poll_count++;
  2280. while (1) {
  2281. if ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
  2282. if (poll_ccb_done) {
  2283. rtn = SUCCESS;
  2284. break;
  2285. } else {
  2286. msleep(25);
  2287. if (poll_count > 100) {
  2288. rtn = FAILED;
  2289. break;
  2290. }
  2291. goto polling_hbc_ccb_retry;
  2292. }
  2293. }
  2294. flag_ccb = readl(&reg->outbound_queueport_low);
  2295. ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
  2296. arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
  2297. pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
  2298. poll_ccb_done = (pCCB == poll_ccb) ? 1 : 0;
  2299. /* check ifcommand done with no error*/
  2300. if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
  2301. if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
  2302. printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
  2303. " poll command abort successfully \n"
  2304. , acb->host->host_no
  2305. , pCCB->pcmd->device->id
  2306. , pCCB->pcmd->device->lun
  2307. , pCCB);
  2308. pCCB->pcmd->result = DID_ABORT << 16;
  2309. arcmsr_ccb_complete(pCCB);
  2310. continue;
  2311. }
  2312. printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
  2313. " command done ccb = '0x%p'"
  2314. "ccboutstandingcount = %d \n"
  2315. , acb->host->host_no
  2316. , pCCB
  2317. , atomic_read(&acb->ccboutstandingcount));
  2318. continue;
  2319. }
  2320. error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
  2321. arcmsr_report_ccb_state(acb, pCCB, error);
  2322. }
  2323. return rtn;
  2324. }
  2325. static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
  2326. struct CommandControlBlock *poll_ccb)
  2327. {
  2328. int rtn = 0;
  2329. switch (acb->adapter_type) {
  2330. case ACB_ADAPTER_TYPE_A: {
  2331. rtn = arcmsr_polling_hba_ccbdone(acb, poll_ccb);
  2332. }
  2333. break;
  2334. case ACB_ADAPTER_TYPE_B: {
  2335. rtn = arcmsr_polling_hbb_ccbdone(acb, poll_ccb);
  2336. }
  2337. break;
  2338. case ACB_ADAPTER_TYPE_C: {
  2339. rtn = arcmsr_polling_hbc_ccbdone(acb, poll_ccb);
  2340. }
  2341. }
  2342. return rtn;
  2343. }
  2344. static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
  2345. {
  2346. uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
  2347. /*
  2348. ********************************************************************
  2349. ** here we need to tell iop 331 our freeccb.HighPart
  2350. ** if freeccb.HighPart is not zero
  2351. ********************************************************************
  2352. */
  2353. cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle);
  2354. cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle);
  2355. acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
  2356. /*
  2357. ***********************************************************************
  2358. ** if adapter type B, set window of "post command Q"
  2359. ***********************************************************************
  2360. */
  2361. switch (acb->adapter_type) {
  2362. case ACB_ADAPTER_TYPE_A: {
  2363. if (cdb_phyaddr_hi32 != 0) {
  2364. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2365. uint32_t intmask_org;
  2366. intmask_org = arcmsr_disable_outbound_ints(acb);
  2367. writel(ARCMSR_SIGNATURE_SET_CONFIG, \
  2368. &reg->message_rwbuffer[0]);
  2369. writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]);
  2370. writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
  2371. &reg->inbound_msgaddr0);
  2372. if (!arcmsr_hba_wait_msgint_ready(acb)) {
  2373. printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
  2374. part physical address timeout\n",
  2375. acb->host->host_no);
  2376. return 1;
  2377. }
  2378. arcmsr_enable_outbound_ints(acb, intmask_org);
  2379. }
  2380. }
  2381. break;
  2382. case ACB_ADAPTER_TYPE_B: {
  2383. unsigned long post_queue_phyaddr;
  2384. uint32_t __iomem *rwbuffer;
  2385. struct MessageUnit_B *reg = acb->pmuB;
  2386. uint32_t intmask_org;
  2387. intmask_org = arcmsr_disable_outbound_ints(acb);
  2388. reg->postq_index = 0;
  2389. reg->doneq_index = 0;
  2390. writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
  2391. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  2392. printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
  2393. acb->host->host_no);
  2394. return 1;
  2395. }
  2396. post_queue_phyaddr = acb->dma_coherent_handle_hbb_mu;
  2397. rwbuffer = reg->message_rwbuffer;
  2398. /* driver "set config" signature */
  2399. writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
  2400. /* normal should be zero */
  2401. writel(cdb_phyaddr_hi32, rwbuffer++);
  2402. /* postQ size (256 + 8)*4 */
  2403. writel(post_queue_phyaddr, rwbuffer++);
  2404. /* doneQ size (256 + 8)*4 */
  2405. writel(post_queue_phyaddr + 1056, rwbuffer++);
  2406. /* ccb maxQ size must be --> [(256 + 8)*4]*/
  2407. writel(1056, rwbuffer);
  2408. writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
  2409. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  2410. printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
  2411. timeout \n",acb->host->host_no);
  2412. return 1;
  2413. }
  2414. arcmsr_hbb_enable_driver_mode(acb);
  2415. arcmsr_enable_outbound_ints(acb, intmask_org);
  2416. }
  2417. break;
  2418. case ACB_ADAPTER_TYPE_C: {
  2419. if (cdb_phyaddr_hi32 != 0) {
  2420. struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
  2421. printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
  2422. acb->adapter_index, cdb_phyaddr_hi32);
  2423. writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
  2424. writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
  2425. writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
  2426. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  2427. if (!arcmsr_hbc_wait_msgint_ready(acb)) {
  2428. printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
  2429. timeout \n", acb->host->host_no);
  2430. return 1;
  2431. }
  2432. }
  2433. }
  2434. }
  2435. return 0;
  2436. }
  2437. static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
  2438. {
  2439. uint32_t firmware_state = 0;
  2440. switch (acb->adapter_type) {
  2441. case ACB_ADAPTER_TYPE_A: {
  2442. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2443. do {
  2444. firmware_state = readl(&reg->outbound_msgaddr1);
  2445. } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
  2446. }
  2447. break;
  2448. case ACB_ADAPTER_TYPE_B: {
  2449. struct MessageUnit_B *reg = acb->pmuB;
  2450. do {
  2451. firmware_state = readl(reg->iop2drv_doorbell);
  2452. } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
  2453. writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
  2454. }
  2455. break;
  2456. case ACB_ADAPTER_TYPE_C: {
  2457. struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
  2458. do {
  2459. firmware_state = readl(&reg->outbound_msgaddr1);
  2460. } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
  2461. }
  2462. }
  2463. }
  2464. static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
  2465. {
  2466. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2467. if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
  2468. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2469. return;
  2470. } else {
  2471. acb->fw_flag = FW_NORMAL;
  2472. if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
  2473. atomic_set(&acb->rq_map_token, 16);
  2474. }
  2475. atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
  2476. if (atomic_dec_and_test(&acb->rq_map_token)) {
  2477. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2478. return;
  2479. }
  2480. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  2481. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2482. }
  2483. return;
  2484. }
  2485. static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
  2486. {
  2487. struct MessageUnit_B __iomem *reg = acb->pmuB;
  2488. if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
  2489. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2490. return;
  2491. } else {
  2492. acb->fw_flag = FW_NORMAL;
  2493. if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
  2494. atomic_set(&acb->rq_map_token, 16);
  2495. }
  2496. atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
  2497. if (atomic_dec_and_test(&acb->rq_map_token)) {
  2498. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2499. return;
  2500. }
  2501. writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
  2502. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2503. }
  2504. return;
  2505. }
  2506. static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
  2507. {
  2508. struct MessageUnit_C __iomem *reg = acb->pmuC;
  2509. if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
  2510. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2511. return;
  2512. } else {
  2513. acb->fw_flag = FW_NORMAL;
  2514. if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
  2515. atomic_set(&acb->rq_map_token, 16);
  2516. }
  2517. atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
  2518. if (atomic_dec_and_test(&acb->rq_map_token)) {
  2519. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2520. return;
  2521. }
  2522. writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
  2523. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
  2524. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2525. }
  2526. return;
  2527. }
  2528. static void arcmsr_request_device_map(unsigned long pacb)
  2529. {
  2530. struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
  2531. switch (acb->adapter_type) {
  2532. case ACB_ADAPTER_TYPE_A: {
  2533. arcmsr_request_hba_device_map(acb);
  2534. }
  2535. break;
  2536. case ACB_ADAPTER_TYPE_B: {
  2537. arcmsr_request_hbb_device_map(acb);
  2538. }
  2539. break;
  2540. case ACB_ADAPTER_TYPE_C: {
  2541. arcmsr_request_hbc_device_map(acb);
  2542. }
  2543. }
  2544. }
  2545. static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
  2546. {
  2547. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2548. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  2549. writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
  2550. if (!arcmsr_hba_wait_msgint_ready(acb)) {
  2551. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  2552. rebulid' timeout \n", acb->host->host_no);
  2553. }
  2554. }
  2555. static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
  2556. {
  2557. struct MessageUnit_B *reg = acb->pmuB;
  2558. acb->acb_flags |= ACB_F_MSG_START_BGRB;
  2559. writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
  2560. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  2561. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  2562. rebulid' timeout \n",acb->host->host_no);
  2563. }
  2564. }
  2565. static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *pACB)
  2566. {
  2567. struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
  2568. pACB->acb_flags |= ACB_F_MSG_START_BGRB;
  2569. writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
  2570. writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
  2571. if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
  2572. printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
  2573. rebulid' timeout \n", pACB->host->host_no);
  2574. }
  2575. return;
  2576. }
  2577. static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
  2578. {
  2579. switch (acb->adapter_type) {
  2580. case ACB_ADAPTER_TYPE_A:
  2581. arcmsr_start_hba_bgrb(acb);
  2582. break;
  2583. case ACB_ADAPTER_TYPE_B:
  2584. arcmsr_start_hbb_bgrb(acb);
  2585. break;
  2586. case ACB_ADAPTER_TYPE_C:
  2587. arcmsr_start_hbc_bgrb(acb);
  2588. }
  2589. }
  2590. static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
  2591. {
  2592. switch (acb->adapter_type) {
  2593. case ACB_ADAPTER_TYPE_A: {
  2594. struct MessageUnit_A __iomem *reg = acb->pmuA;
  2595. uint32_t outbound_doorbell;
  2596. /* empty doorbell Qbuffer if door bell ringed */
  2597. outbound_doorbell = readl(&reg->outbound_doorbell);
  2598. /*clear doorbell interrupt */
  2599. writel(outbound_doorbell, &reg->outbound_doorbell);
  2600. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  2601. }
  2602. break;
  2603. case ACB_ADAPTER_TYPE_B: {
  2604. struct MessageUnit_B *reg = acb->pmuB;
  2605. /*clear interrupt and message state*/
  2606. writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
  2607. writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
  2608. /* let IOP know data has been read */
  2609. }
  2610. break;
  2611. case ACB_ADAPTER_TYPE_C: {
  2612. struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
  2613. uint32_t outbound_doorbell;
  2614. /* empty doorbell Qbuffer if door bell ringed */
  2615. outbound_doorbell = readl(&reg->outbound_doorbell);
  2616. writel(outbound_doorbell, &reg->outbound_doorbell_clear);
  2617. writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
  2618. }
  2619. }
  2620. }
  2621. static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
  2622. {
  2623. switch (acb->adapter_type) {
  2624. case ACB_ADAPTER_TYPE_A:
  2625. return;
  2626. case ACB_ADAPTER_TYPE_B:
  2627. {
  2628. struct MessageUnit_B *reg = acb->pmuB;
  2629. writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
  2630. if (!arcmsr_hbb_wait_msgint_ready(acb)) {
  2631. printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
  2632. return;
  2633. }
  2634. }
  2635. break;
  2636. case ACB_ADAPTER_TYPE_C:
  2637. return;
  2638. }
  2639. return;
  2640. }
  2641. static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
  2642. {
  2643. uint8_t value[64];
  2644. int i, count = 0;
  2645. struct MessageUnit_A __iomem *pmuA = acb->pmuA;
  2646. struct MessageUnit_C __iomem *pmuC = acb->pmuC;
  2647. u32 temp = 0;
  2648. /* backup pci config data */
  2649. printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
  2650. for (i = 0; i < 64; i++) {
  2651. pci_read_config_byte(acb->pdev, i, &value[i]);
  2652. }
  2653. /* hardware reset signal */
  2654. if ((acb->dev_id == 0x1680)) {
  2655. writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
  2656. } else if ((acb->dev_id == 0x1880)) {
  2657. do {
  2658. count++;
  2659. writel(0xF, &pmuC->write_sequence);
  2660. writel(0x4, &pmuC->write_sequence);
  2661. writel(0xB, &pmuC->write_sequence);
  2662. writel(0x2, &pmuC->write_sequence);
  2663. writel(0x7, &pmuC->write_sequence);
  2664. writel(0xD, &pmuC->write_sequence);
  2665. } while ((((temp = readl(&pmuC->host_diagnostic)) | ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
  2666. writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
  2667. } else {
  2668. pci_write_config_byte(acb->pdev, 0x84, 0x20);
  2669. }
  2670. msleep(2000);
  2671. /* write back pci config data */
  2672. for (i = 0; i < 64; i++) {
  2673. pci_write_config_byte(acb->pdev, i, value[i]);
  2674. }
  2675. msleep(1000);
  2676. return;
  2677. }
  2678. static void arcmsr_iop_init(struct AdapterControlBlock *acb)
  2679. {
  2680. uint32_t intmask_org;
  2681. /* disable all outbound interrupt */
  2682. intmask_org = arcmsr_disable_outbound_ints(acb);
  2683. arcmsr_wait_firmware_ready(acb);
  2684. arcmsr_iop_confirm(acb);
  2685. /*start background rebuild*/
  2686. arcmsr_start_adapter_bgrb(acb);
  2687. /* empty doorbell Qbuffer if door bell ringed */
  2688. arcmsr_clear_doorbell_queue_buffer(acb);
  2689. arcmsr_enable_eoi_mode(acb);
  2690. /* enable outbound Post Queue,outbound doorbell Interrupt */
  2691. arcmsr_enable_outbound_ints(acb, intmask_org);
  2692. acb->acb_flags |= ACB_F_IOP_INITED;
  2693. }
  2694. static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
  2695. {
  2696. struct CommandControlBlock *ccb;
  2697. uint32_t intmask_org;
  2698. uint8_t rtnval = 0x00;
  2699. int i = 0;
  2700. unsigned long flags;
  2701. if (atomic_read(&acb->ccboutstandingcount) != 0) {
  2702. /* disable all outbound interrupt */
  2703. intmask_org = arcmsr_disable_outbound_ints(acb);
  2704. /* talk to iop 331 outstanding command aborted */
  2705. rtnval = arcmsr_abort_allcmd(acb);
  2706. /* clear all outbound posted Q */
  2707. arcmsr_done4abort_postqueue(acb);
  2708. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  2709. ccb = acb->pccb_pool[i];
  2710. if (ccb->startdone == ARCMSR_CCB_START) {
  2711. scsi_dma_unmap(ccb->pcmd);
  2712. ccb->startdone = ARCMSR_CCB_DONE;
  2713. ccb->ccb_flags = 0;
  2714. spin_lock_irqsave(&acb->ccblist_lock, flags);
  2715. list_add_tail(&ccb->list, &acb->ccb_free_list);
  2716. spin_unlock_irqrestore(&acb->ccblist_lock, flags);
  2717. }
  2718. }
  2719. atomic_set(&acb->ccboutstandingcount, 0);
  2720. /* enable all outbound interrupt */
  2721. arcmsr_enable_outbound_ints(acb, intmask_org);
  2722. return rtnval;
  2723. }
  2724. return rtnval;
  2725. }
  2726. static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
  2727. {
  2728. struct AdapterControlBlock *acb;
  2729. uint32_t intmask_org, outbound_doorbell;
  2730. int retry_count = 0;
  2731. int rtn = FAILED;
  2732. acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
  2733. printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
  2734. acb->num_resets++;
  2735. switch(acb->adapter_type){
  2736. case ACB_ADAPTER_TYPE_A:{
  2737. if (acb->acb_flags & ACB_F_BUS_RESET){
  2738. long timeout;
  2739. printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
  2740. timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
  2741. if (timeout) {
  2742. return SUCCESS;
  2743. }
  2744. }
  2745. acb->acb_flags |= ACB_F_BUS_RESET;
  2746. if (!arcmsr_iop_reset(acb)) {
  2747. struct MessageUnit_A __iomem *reg;
  2748. reg = acb->pmuA;
  2749. arcmsr_hardware_reset(acb);
  2750. acb->acb_flags &= ~ACB_F_IOP_INITED;
  2751. sleep_again:
  2752. ssleep(ARCMSR_SLEEPTIME);
  2753. if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
  2754. printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
  2755. if (retry_count > ARCMSR_RETRYCOUNT) {
  2756. acb->fw_flag = FW_DEADLOCK;
  2757. printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
  2758. return FAILED;
  2759. }
  2760. retry_count++;
  2761. goto sleep_again;
  2762. }
  2763. acb->acb_flags |= ACB_F_IOP_INITED;
  2764. /* disable all outbound interrupt */
  2765. intmask_org = arcmsr_disable_outbound_ints(acb);
  2766. arcmsr_get_firmware_spec(acb);
  2767. arcmsr_start_adapter_bgrb(acb);
  2768. /* clear Qbuffer if door bell ringed */
  2769. outbound_doorbell = readl(&reg->outbound_doorbell);
  2770. writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
  2771. writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
  2772. /* enable outbound Post Queue,outbound doorbell Interrupt */
  2773. arcmsr_enable_outbound_ints(acb, intmask_org);
  2774. atomic_set(&acb->rq_map_token, 16);
  2775. atomic_set(&acb->ante_token_value, 16);
  2776. acb->fw_flag = FW_NORMAL;
  2777. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2778. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2779. rtn = SUCCESS;
  2780. printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
  2781. } else {
  2782. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2783. atomic_set(&acb->rq_map_token, 16);
  2784. atomic_set(&acb->ante_token_value, 16);
  2785. acb->fw_flag = FW_NORMAL;
  2786. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
  2787. rtn = SUCCESS;
  2788. }
  2789. break;
  2790. }
  2791. case ACB_ADAPTER_TYPE_B:{
  2792. acb->acb_flags |= ACB_F_BUS_RESET;
  2793. if (!arcmsr_iop_reset(acb)) {
  2794. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2795. rtn = FAILED;
  2796. } else {
  2797. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2798. atomic_set(&acb->rq_map_token, 16);
  2799. atomic_set(&acb->ante_token_value, 16);
  2800. acb->fw_flag = FW_NORMAL;
  2801. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2802. rtn = SUCCESS;
  2803. }
  2804. break;
  2805. }
  2806. case ACB_ADAPTER_TYPE_C:{
  2807. if (acb->acb_flags & ACB_F_BUS_RESET) {
  2808. long timeout;
  2809. printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
  2810. timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
  2811. if (timeout) {
  2812. return SUCCESS;
  2813. }
  2814. }
  2815. acb->acb_flags |= ACB_F_BUS_RESET;
  2816. if (!arcmsr_iop_reset(acb)) {
  2817. struct MessageUnit_C __iomem *reg;
  2818. reg = acb->pmuC;
  2819. arcmsr_hardware_reset(acb);
  2820. acb->acb_flags &= ~ACB_F_IOP_INITED;
  2821. sleep:
  2822. ssleep(ARCMSR_SLEEPTIME);
  2823. if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
  2824. printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
  2825. if (retry_count > ARCMSR_RETRYCOUNT) {
  2826. acb->fw_flag = FW_DEADLOCK;
  2827. printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
  2828. return FAILED;
  2829. }
  2830. retry_count++;
  2831. goto sleep;
  2832. }
  2833. acb->acb_flags |= ACB_F_IOP_INITED;
  2834. /* disable all outbound interrupt */
  2835. intmask_org = arcmsr_disable_outbound_ints(acb);
  2836. arcmsr_get_firmware_spec(acb);
  2837. arcmsr_start_adapter_bgrb(acb);
  2838. /* clear Qbuffer if door bell ringed */
  2839. outbound_doorbell = readl(&reg->outbound_doorbell);
  2840. writel(outbound_doorbell, &reg->outbound_doorbell_clear); /*clear interrupt */
  2841. writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
  2842. /* enable outbound Post Queue,outbound doorbell Interrupt */
  2843. arcmsr_enable_outbound_ints(acb, intmask_org);
  2844. atomic_set(&acb->rq_map_token, 16);
  2845. atomic_set(&acb->ante_token_value, 16);
  2846. acb->fw_flag = FW_NORMAL;
  2847. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
  2848. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2849. rtn = SUCCESS;
  2850. printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
  2851. } else {
  2852. acb->acb_flags &= ~ACB_F_BUS_RESET;
  2853. atomic_set(&acb->rq_map_token, 16);
  2854. atomic_set(&acb->ante_token_value, 16);
  2855. acb->fw_flag = FW_NORMAL;
  2856. mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
  2857. rtn = SUCCESS;
  2858. }
  2859. break;
  2860. }
  2861. }
  2862. return rtn;
  2863. }
  2864. static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
  2865. struct CommandControlBlock *ccb)
  2866. {
  2867. int rtn;
  2868. rtn = arcmsr_polling_ccbdone(acb, ccb);
  2869. return rtn;
  2870. }
  2871. static int arcmsr_abort(struct scsi_cmnd *cmd)
  2872. {
  2873. struct AdapterControlBlock *acb =
  2874. (struct AdapterControlBlock *)cmd->device->host->hostdata;
  2875. int i = 0;
  2876. int rtn = FAILED;
  2877. printk(KERN_NOTICE
  2878. "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
  2879. acb->host->host_no, cmd->device->id, cmd->device->lun);
  2880. acb->acb_flags |= ACB_F_ABORT;
  2881. acb->num_aborts++;
  2882. /*
  2883. ************************************************
  2884. ** the all interrupt service routine is locked
  2885. ** we need to handle it as soon as possible and exit
  2886. ************************************************
  2887. */
  2888. if (!atomic_read(&acb->ccboutstandingcount))
  2889. return rtn;
  2890. for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
  2891. struct CommandControlBlock *ccb = acb->pccb_pool[i];
  2892. if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
  2893. ccb->startdone = ARCMSR_CCB_ABORTED;
  2894. rtn = arcmsr_abort_one_cmd(acb, ccb);
  2895. break;
  2896. }
  2897. }
  2898. acb->acb_flags &= ~ACB_F_ABORT;
  2899. return rtn;
  2900. }
  2901. static const char *arcmsr_info(struct Scsi_Host *host)
  2902. {
  2903. struct AdapterControlBlock *acb =
  2904. (struct AdapterControlBlock *) host->hostdata;
  2905. static char buf[256];
  2906. char *type;
  2907. int raid6 = 1;
  2908. switch (acb->pdev->device) {
  2909. case PCI_DEVICE_ID_ARECA_1110:
  2910. case PCI_DEVICE_ID_ARECA_1200:
  2911. case PCI_DEVICE_ID_ARECA_1202:
  2912. case PCI_DEVICE_ID_ARECA_1210:
  2913. raid6 = 0;
  2914. /*FALLTHRU*/
  2915. case PCI_DEVICE_ID_ARECA_1120:
  2916. case PCI_DEVICE_ID_ARECA_1130:
  2917. case PCI_DEVICE_ID_ARECA_1160:
  2918. case PCI_DEVICE_ID_ARECA_1170:
  2919. case PCI_DEVICE_ID_ARECA_1201:
  2920. case PCI_DEVICE_ID_ARECA_1220:
  2921. case PCI_DEVICE_ID_ARECA_1230:
  2922. case PCI_DEVICE_ID_ARECA_1260:
  2923. case PCI_DEVICE_ID_ARECA_1270:
  2924. case PCI_DEVICE_ID_ARECA_1280:
  2925. type = "SATA";
  2926. break;
  2927. case PCI_DEVICE_ID_ARECA_1380:
  2928. case PCI_DEVICE_ID_ARECA_1381:
  2929. case PCI_DEVICE_ID_ARECA_1680:
  2930. case PCI_DEVICE_ID_ARECA_1681:
  2931. case PCI_DEVICE_ID_ARECA_1880:
  2932. type = "SAS";
  2933. break;
  2934. default:
  2935. type = "X-TYPE";
  2936. break;
  2937. }
  2938. sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
  2939. type, raid6 ? "( RAID6 capable)" : "",
  2940. ARCMSR_DRIVER_VERSION);
  2941. return buf;
  2942. }