linit.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707
  1. /*
  2. * Adaptec AAC series RAID controller driver
  3. * (c) Copyright 2001 Red Hat Inc.
  4. *
  5. * based on the old aacraid driver that is..
  6. * Adaptec aacraid device driver for Linux.
  7. *
  8. * Copyright (c) 2000-2010 Adaptec, Inc.
  9. * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; see the file COPYING. If not, write to
  23. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24. *
  25. * Module Name:
  26. * linit.c
  27. *
  28. * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
  29. */
  30. #include <linux/compat.h>
  31. #include <linux/blkdev.h>
  32. #include <linux/completion.h>
  33. #include <linux/init.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/kernel.h>
  36. #include <linux/module.h>
  37. #include <linux/moduleparam.h>
  38. #include <linux/pci.h>
  39. #include <linux/aer.h>
  40. #include <linux/pci-aspm.h>
  41. #include <linux/slab.h>
  42. #include <linux/mutex.h>
  43. #include <linux/spinlock.h>
  44. #include <linux/syscalls.h>
  45. #include <linux/delay.h>
  46. #include <linux/kthread.h>
  47. #include <scsi/scsi.h>
  48. #include <scsi/scsi_cmnd.h>
  49. #include <scsi/scsi_device.h>
  50. #include <scsi/scsi_host.h>
  51. #include <scsi/scsi_tcq.h>
  52. #include <scsi/scsicam.h>
  53. #include <scsi/scsi_eh.h>
  54. #include "aacraid.h"
  55. #define AAC_DRIVER_VERSION "1.2-1"
  56. #ifndef AAC_DRIVER_BRANCH
  57. #define AAC_DRIVER_BRANCH ""
  58. #endif
  59. #define AAC_DRIVERNAME "aacraid"
  60. #ifdef AAC_DRIVER_BUILD
  61. #define _str(x) #x
  62. #define str(x) _str(x)
  63. #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
  64. #else
  65. #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
  66. #endif
  67. MODULE_AUTHOR("Red Hat Inc and Adaptec");
  68. MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
  69. "Adaptec Advanced Raid Products, "
  70. "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
  71. MODULE_LICENSE("GPL");
  72. MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
  73. static DEFINE_MUTEX(aac_mutex);
  74. static LIST_HEAD(aac_devices);
  75. static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED;
  76. char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
  77. /*
  78. * Because of the way Linux names scsi devices, the order in this table has
  79. * become important. Check for on-board Raid first, add-in cards second.
  80. *
  81. * Note: The last field is used to index into aac_drivers below.
  82. */
  83. static const struct pci_device_id aac_pci_tbl[] = {
  84. { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
  85. { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
  86. { 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
  87. { 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
  88. { 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
  89. { 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
  90. { 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
  91. { 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
  92. { 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
  93. { 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
  94. { 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
  95. { 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
  96. { 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
  97. { 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
  98. { 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
  99. { 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
  100. { 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
  101. { 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
  102. { 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
  103. { 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
  104. { 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
  105. { 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
  106. { 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
  107. { 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
  108. { 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
  109. { 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */
  110. { 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */
  111. { 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */
  112. { 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */
  113. { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */
  114. { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
  115. { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
  116. { 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
  117. { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
  118. { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
  119. { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
  120. { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
  121. { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
  122. { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
  123. { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
  124. { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
  125. { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
  126. { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
  127. { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
  128. { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
  129. { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */
  130. { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
  131. { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
  132. { 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
  133. { 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
  134. { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */
  135. { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
  136. { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
  137. { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */
  138. { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
  139. { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
  140. { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
  141. { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
  142. { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
  143. { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
  144. { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
  145. { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
  146. { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
  147. { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
  148. { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
  149. { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
  150. { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
  151. { 0,}
  152. };
  153. MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
  154. /*
  155. * dmb - For now we add the number of channels to this structure.
  156. * In the future we should add a fib that reports the number of channels
  157. * for the card. At that time we can remove the channels from here
  158. */
  159. static struct aac_driver_ident aac_drivers[] = {
  160. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
  161. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
  162. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
  163. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
  164. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
  165. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
  166. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
  167. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
  168. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
  169. { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
  170. { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
  171. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
  172. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
  173. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
  174. { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
  175. { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
  176. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
  177. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
  178. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020ZCR ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
  179. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025ZCR ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
  180. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
  181. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
  182. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2820SA ", 1 }, /* AAR-2820SA (Intruder) */
  183. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2620SA ", 1 }, /* AAR-2620SA (Intruder) */
  184. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2420SA ", 1 }, /* AAR-2420SA (Intruder) */
  185. { aac_rkt_init, "aacraid", "ICP ", "ICP9024RO ", 2 }, /* ICP9024RO (Lancer) */
  186. { aac_rkt_init, "aacraid", "ICP ", "ICP9014RO ", 1 }, /* ICP9014RO (Lancer) */
  187. { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */
  188. { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */
  189. { aac_rkt_init, "aacraid", "ICP ", "ICP5445AU ", 1 }, /* ICP5445AU (Hurricane44) */
  190. { aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */
  191. { aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */
  192. { aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */
  193. { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
  194. { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
  195. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
  196. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025SA ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
  197. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
  198. { aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
  199. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
  200. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
  201. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2026ZCR ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
  202. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2610SA ", 1 }, /* SATA 6Ch (Bearcat) */
  203. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
  204. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005 ", 1 }, /* ASR-4005 */
  205. { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
  206. { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
  207. { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
  208. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000 ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */
  209. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
  210. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
  211. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-3800 ", 1 }, /* ASR-3800 (Hurricane44) */
  212. { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
  213. { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
  214. { aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
  215. { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
  216. { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
  217. { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
  218. { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
  219. { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
  220. { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
  221. { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
  222. { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
  223. { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
  224. { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
  225. };
  226. /**
  227. * aac_queuecommand - queue a SCSI command
  228. * @cmd: SCSI command to queue
  229. * @done: Function to call on command completion
  230. *
  231. * Queues a command for execution by the associated Host Adapter.
  232. *
  233. * TODO: unify with aac_scsi_cmd().
  234. */
  235. static int aac_queuecommand(struct Scsi_Host *shost,
  236. struct scsi_cmnd *cmd)
  237. {
  238. int r = 0;
  239. cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
  240. r = (aac_scsi_cmd(cmd) ? FAILED : 0);
  241. return r;
  242. }
  243. /**
  244. * aac_info - Returns the host adapter name
  245. * @shost: Scsi host to report on
  246. *
  247. * Returns a static string describing the device in question
  248. */
  249. static const char *aac_info(struct Scsi_Host *shost)
  250. {
  251. struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
  252. return aac_drivers[dev->cardtype].name;
  253. }
  254. /**
  255. * aac_get_driver_ident
  256. * @devtype: index into lookup table
  257. *
  258. * Returns a pointer to the entry in the driver lookup table.
  259. */
  260. struct aac_driver_ident* aac_get_driver_ident(int devtype)
  261. {
  262. return &aac_drivers[devtype];
  263. }
  264. /**
  265. * aac_biosparm - return BIOS parameters for disk
  266. * @sdev: The scsi device corresponding to the disk
  267. * @bdev: the block device corresponding to the disk
  268. * @capacity: the sector capacity of the disk
  269. * @geom: geometry block to fill in
  270. *
  271. * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
  272. * The default disk geometry is 64 heads, 32 sectors, and the appropriate
  273. * number of cylinders so as not to exceed drive capacity. In order for
  274. * disks equal to or larger than 1 GB to be addressable by the BIOS
  275. * without exceeding the BIOS limitation of 1024 cylinders, Extended
  276. * Translation should be enabled. With Extended Translation enabled,
  277. * drives between 1 GB inclusive and 2 GB exclusive are given a disk
  278. * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
  279. * are given a disk geometry of 255 heads and 63 sectors. However, if
  280. * the BIOS detects that the Extended Translation setting does not match
  281. * the geometry in the partition table, then the translation inferred
  282. * from the partition table will be used by the BIOS, and a warning may
  283. * be displayed.
  284. */
  285. static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
  286. sector_t capacity, int *geom)
  287. {
  288. struct diskparm *param = (struct diskparm *)geom;
  289. unsigned char *buf;
  290. dprintk((KERN_DEBUG "aac_biosparm.\n"));
  291. /*
  292. * Assuming extended translation is enabled - #REVISIT#
  293. */
  294. if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
  295. if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
  296. param->heads = 255;
  297. param->sectors = 63;
  298. } else {
  299. param->heads = 128;
  300. param->sectors = 32;
  301. }
  302. } else {
  303. param->heads = 64;
  304. param->sectors = 32;
  305. }
  306. param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
  307. /*
  308. * Read the first 1024 bytes from the disk device, if the boot
  309. * sector partition table is valid, search for a partition table
  310. * entry whose end_head matches one of the standard geometry
  311. * translations ( 64/32, 128/32, 255/63 ).
  312. */
  313. buf = scsi_bios_ptable(bdev);
  314. if (!buf)
  315. return 0;
  316. if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
  317. struct partition *first = (struct partition * )buf;
  318. struct partition *entry = first;
  319. int saved_cylinders = param->cylinders;
  320. int num;
  321. unsigned char end_head, end_sec;
  322. for(num = 0; num < 4; num++) {
  323. end_head = entry->end_head;
  324. end_sec = entry->end_sector & 0x3f;
  325. if(end_head == 63) {
  326. param->heads = 64;
  327. param->sectors = 32;
  328. break;
  329. } else if(end_head == 127) {
  330. param->heads = 128;
  331. param->sectors = 32;
  332. break;
  333. } else if(end_head == 254) {
  334. param->heads = 255;
  335. param->sectors = 63;
  336. break;
  337. }
  338. entry++;
  339. }
  340. if (num == 4) {
  341. end_head = first->end_head;
  342. end_sec = first->end_sector & 0x3f;
  343. }
  344. param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
  345. if (num < 4 && end_sec == param->sectors) {
  346. if (param->cylinders != saved_cylinders)
  347. dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
  348. param->heads, param->sectors, num));
  349. } else if (end_head > 0 || end_sec > 0) {
  350. dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
  351. end_head + 1, end_sec, num));
  352. dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
  353. param->heads, param->sectors));
  354. }
  355. }
  356. kfree(buf);
  357. return 0;
  358. }
  359. /**
  360. * aac_slave_configure - compute queue depths
  361. * @sdev: SCSI device we are considering
  362. *
  363. * Selects queue depths for each target device based on the host adapter's
  364. * total capacity and the queue depth supported by the target device.
  365. * A queue depth of one automatically disables tagged queueing.
  366. */
  367. static int aac_slave_configure(struct scsi_device *sdev)
  368. {
  369. struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
  370. if (aac->jbod && (sdev->type == TYPE_DISK))
  371. sdev->removable = 1;
  372. if ((sdev->type == TYPE_DISK) &&
  373. (sdev_channel(sdev) != CONTAINER_CHANNEL) &&
  374. (!aac->jbod || sdev->inq_periph_qual) &&
  375. (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
  376. if (expose_physicals == 0)
  377. return -ENXIO;
  378. if (expose_physicals < 0)
  379. sdev->no_uld_attach = 1;
  380. }
  381. if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
  382. (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2)) &&
  383. !sdev->no_uld_attach) {
  384. struct scsi_device * dev;
  385. struct Scsi_Host *host = sdev->host;
  386. unsigned num_lsu = 0;
  387. unsigned num_one = 0;
  388. unsigned depth;
  389. unsigned cid;
  390. /*
  391. * Firmware has an individual device recovery time typically
  392. * of 35 seconds, give us a margin.
  393. */
  394. if (sdev->request_queue->rq_timeout < (45 * HZ))
  395. blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
  396. for (cid = 0; cid < aac->maximum_num_containers; ++cid)
  397. if (aac->fsa_dev[cid].valid)
  398. ++num_lsu;
  399. __shost_for_each_device(dev, host) {
  400. if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
  401. (!aac->raid_scsi_mode ||
  402. (sdev_channel(sdev) != 2)) &&
  403. !dev->no_uld_attach) {
  404. if ((sdev_channel(dev) != CONTAINER_CHANNEL)
  405. || !aac->fsa_dev[sdev_id(dev)].valid)
  406. ++num_lsu;
  407. } else
  408. ++num_one;
  409. }
  410. if (num_lsu == 0)
  411. ++num_lsu;
  412. depth = (host->can_queue - num_one) / num_lsu;
  413. if (depth > 256)
  414. depth = 256;
  415. else if (depth < 2)
  416. depth = 2;
  417. scsi_change_queue_depth(sdev, depth);
  418. } else {
  419. scsi_change_queue_depth(sdev, 1);
  420. sdev->tagged_supported = 1;
  421. }
  422. return 0;
  423. }
  424. /**
  425. * aac_change_queue_depth - alter queue depths
  426. * @sdev: SCSI device we are considering
  427. * @depth: desired queue depth
  428. *
  429. * Alters queue depths for target device based on the host adapter's
  430. * total capacity and the queue depth supported by the target device.
  431. */
  432. static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
  433. {
  434. if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
  435. (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
  436. struct scsi_device * dev;
  437. struct Scsi_Host *host = sdev->host;
  438. unsigned num = 0;
  439. __shost_for_each_device(dev, host) {
  440. if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
  441. (sdev_channel(dev) == CONTAINER_CHANNEL))
  442. ++num;
  443. ++num;
  444. }
  445. if (num >= host->can_queue)
  446. num = host->can_queue - 1;
  447. if (depth > (host->can_queue - num))
  448. depth = host->can_queue - num;
  449. if (depth > 256)
  450. depth = 256;
  451. else if (depth < 2)
  452. depth = 2;
  453. return scsi_change_queue_depth(sdev, depth);
  454. }
  455. return scsi_change_queue_depth(sdev, 1);
  456. }
  457. static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
  458. {
  459. struct scsi_device *sdev = to_scsi_device(dev);
  460. struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
  461. if (sdev_channel(sdev) != CONTAINER_CHANNEL)
  462. return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
  463. ? "Hidden\n" :
  464. ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
  465. return snprintf(buf, PAGE_SIZE, "%s\n",
  466. get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
  467. }
  468. static struct device_attribute aac_raid_level_attr = {
  469. .attr = {
  470. .name = "level",
  471. .mode = S_IRUGO,
  472. },
  473. .show = aac_show_raid_level
  474. };
  475. static struct device_attribute *aac_dev_attrs[] = {
  476. &aac_raid_level_attr,
  477. NULL,
  478. };
  479. static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
  480. {
  481. struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
  482. if (!capable(CAP_SYS_RAWIO))
  483. return -EPERM;
  484. return aac_do_ioctl(dev, cmd, arg);
  485. }
  486. static int aac_eh_abort(struct scsi_cmnd* cmd)
  487. {
  488. struct scsi_device * dev = cmd->device;
  489. struct Scsi_Host * host = dev->host;
  490. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  491. int count;
  492. int ret = FAILED;
  493. printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%llu)\n",
  494. AAC_DRIVERNAME,
  495. host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
  496. switch (cmd->cmnd[0]) {
  497. case SERVICE_ACTION_IN_16:
  498. if (!(aac->raw_io_interface) ||
  499. !(aac->raw_io_64) ||
  500. ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
  501. break;
  502. case INQUIRY:
  503. case READ_CAPACITY:
  504. /* Mark associated FIB to not complete, eh handler does this */
  505. for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
  506. struct fib * fib = &aac->fibs[count];
  507. if (fib->hw_fib_va->header.XferState &&
  508. (fib->flags & FIB_CONTEXT_FLAG) &&
  509. (fib->callback_data == cmd)) {
  510. fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
  511. cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
  512. ret = SUCCESS;
  513. }
  514. }
  515. break;
  516. case TEST_UNIT_READY:
  517. /* Mark associated FIB to not complete, eh handler does this */
  518. for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
  519. struct scsi_cmnd * command;
  520. struct fib * fib = &aac->fibs[count];
  521. if ((fib->hw_fib_va->header.XferState & cpu_to_le32(Async | NoResponseExpected)) &&
  522. (fib->flags & FIB_CONTEXT_FLAG) &&
  523. ((command = fib->callback_data)) &&
  524. (command->device == cmd->device)) {
  525. fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
  526. command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
  527. if (command == cmd)
  528. ret = SUCCESS;
  529. }
  530. }
  531. }
  532. return ret;
  533. }
  534. /*
  535. * aac_eh_reset - Reset command handling
  536. * @scsi_cmd: SCSI command block causing the reset
  537. *
  538. */
  539. static int aac_eh_reset(struct scsi_cmnd* cmd)
  540. {
  541. struct scsi_device * dev = cmd->device;
  542. struct Scsi_Host * host = dev->host;
  543. struct scsi_cmnd * command;
  544. int count;
  545. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  546. unsigned long flags;
  547. /* Mark the associated FIB to not complete, eh handler does this */
  548. for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
  549. struct fib * fib = &aac->fibs[count];
  550. if (fib->hw_fib_va->header.XferState &&
  551. (fib->flags & FIB_CONTEXT_FLAG) &&
  552. (fib->callback_data == cmd)) {
  553. fib->flags |= FIB_CONTEXT_FLAG_TIMED_OUT;
  554. cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
  555. }
  556. }
  557. printk(KERN_ERR "%s: Host adapter reset request. SCSI hang ?\n",
  558. AAC_DRIVERNAME);
  559. if ((count = aac_check_health(aac)))
  560. return count;
  561. /*
  562. * Wait for all commands to complete to this specific
  563. * target (block maximum 60 seconds).
  564. */
  565. for (count = 60; count; --count) {
  566. int active = aac->in_reset;
  567. if (active == 0)
  568. __shost_for_each_device(dev, host) {
  569. spin_lock_irqsave(&dev->list_lock, flags);
  570. list_for_each_entry(command, &dev->cmd_list, list) {
  571. if ((command != cmd) &&
  572. (command->SCp.phase == AAC_OWNER_FIRMWARE)) {
  573. active++;
  574. break;
  575. }
  576. }
  577. spin_unlock_irqrestore(&dev->list_lock, flags);
  578. if (active)
  579. break;
  580. }
  581. /*
  582. * We can exit If all the commands are complete
  583. */
  584. if (active == 0)
  585. return SUCCESS;
  586. ssleep(1);
  587. }
  588. printk(KERN_ERR "%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
  589. /*
  590. * This adapter needs a blind reset, only do so for Adapters that
  591. * support a register, instead of a commanded, reset.
  592. */
  593. if (((aac->supplement_adapter_info.SupportedOptions2 &
  594. AAC_OPTION_MU_RESET) ||
  595. (aac->supplement_adapter_info.SupportedOptions2 &
  596. AAC_OPTION_DOORBELL_RESET)) &&
  597. aac_check_reset &&
  598. ((aac_check_reset != 1) ||
  599. !(aac->supplement_adapter_info.SupportedOptions2 &
  600. AAC_OPTION_IGNORE_RESET)))
  601. aac_reset_adapter(aac, 2); /* Bypass wait for command quiesce */
  602. return SUCCESS; /* Cause an immediate retry of the command with a ten second delay after successful tur */
  603. }
  604. /**
  605. * aac_cfg_open - open a configuration file
  606. * @inode: inode being opened
  607. * @file: file handle attached
  608. *
  609. * Called when the configuration device is opened. Does the needed
  610. * set up on the handle and then returns
  611. *
  612. * Bugs: This needs extending to check a given adapter is present
  613. * so we can support hot plugging, and to ref count adapters.
  614. */
  615. static int aac_cfg_open(struct inode *inode, struct file *file)
  616. {
  617. struct aac_dev *aac;
  618. unsigned minor_number = iminor(inode);
  619. int err = -ENODEV;
  620. mutex_lock(&aac_mutex); /* BKL pushdown: nothing else protects this list */
  621. list_for_each_entry(aac, &aac_devices, entry) {
  622. if (aac->id == minor_number) {
  623. file->private_data = aac;
  624. err = 0;
  625. break;
  626. }
  627. }
  628. mutex_unlock(&aac_mutex);
  629. return err;
  630. }
  631. /**
  632. * aac_cfg_ioctl - AAC configuration request
  633. * @inode: inode of device
  634. * @file: file handle
  635. * @cmd: ioctl command code
  636. * @arg: argument
  637. *
  638. * Handles a configuration ioctl. Currently this involves wrapping it
  639. * up and feeding it into the nasty windowsalike glue layer.
  640. *
  641. * Bugs: Needs locking against parallel ioctls lower down
  642. * Bugs: Needs to handle hot plugging
  643. */
  644. static long aac_cfg_ioctl(struct file *file,
  645. unsigned int cmd, unsigned long arg)
  646. {
  647. struct aac_dev *aac = (struct aac_dev *)file->private_data;
  648. if (!capable(CAP_SYS_RAWIO))
  649. return -EPERM;
  650. return aac_do_ioctl(aac, cmd, (void __user *)arg);
  651. }
  652. #ifdef CONFIG_COMPAT
  653. static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
  654. {
  655. long ret;
  656. switch (cmd) {
  657. case FSACTL_MINIPORT_REV_CHECK:
  658. case FSACTL_SENDFIB:
  659. case FSACTL_OPEN_GET_ADAPTER_FIB:
  660. case FSACTL_CLOSE_GET_ADAPTER_FIB:
  661. case FSACTL_SEND_RAW_SRB:
  662. case FSACTL_GET_PCI_INFO:
  663. case FSACTL_QUERY_DISK:
  664. case FSACTL_DELETE_DISK:
  665. case FSACTL_FORCE_DELETE_DISK:
  666. case FSACTL_GET_CONTAINERS:
  667. case FSACTL_SEND_LARGE_FIB:
  668. ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
  669. break;
  670. case FSACTL_GET_NEXT_ADAPTER_FIB: {
  671. struct fib_ioctl __user *f;
  672. f = compat_alloc_user_space(sizeof(*f));
  673. ret = 0;
  674. if (clear_user(f, sizeof(*f)))
  675. ret = -EFAULT;
  676. if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
  677. ret = -EFAULT;
  678. if (!ret)
  679. ret = aac_do_ioctl(dev, cmd, f);
  680. break;
  681. }
  682. default:
  683. ret = -ENOIOCTLCMD;
  684. break;
  685. }
  686. return ret;
  687. }
  688. static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
  689. {
  690. struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
  691. if (!capable(CAP_SYS_RAWIO))
  692. return -EPERM;
  693. return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
  694. }
  695. static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
  696. {
  697. if (!capable(CAP_SYS_RAWIO))
  698. return -EPERM;
  699. return aac_compat_do_ioctl(file->private_data, cmd, arg);
  700. }
  701. #endif
  702. static ssize_t aac_show_model(struct device *device,
  703. struct device_attribute *attr, char *buf)
  704. {
  705. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  706. int len;
  707. if (dev->supplement_adapter_info.AdapterTypeText[0]) {
  708. char * cp = dev->supplement_adapter_info.AdapterTypeText;
  709. while (*cp && *cp != ' ')
  710. ++cp;
  711. while (*cp == ' ')
  712. ++cp;
  713. len = snprintf(buf, PAGE_SIZE, "%s\n", cp);
  714. } else
  715. len = snprintf(buf, PAGE_SIZE, "%s\n",
  716. aac_drivers[dev->cardtype].model);
  717. return len;
  718. }
  719. static ssize_t aac_show_vendor(struct device *device,
  720. struct device_attribute *attr, char *buf)
  721. {
  722. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  723. int len;
  724. if (dev->supplement_adapter_info.AdapterTypeText[0]) {
  725. char * cp = dev->supplement_adapter_info.AdapterTypeText;
  726. while (*cp && *cp != ' ')
  727. ++cp;
  728. len = snprintf(buf, PAGE_SIZE, "%.*s\n",
  729. (int)(cp - (char *)dev->supplement_adapter_info.AdapterTypeText),
  730. dev->supplement_adapter_info.AdapterTypeText);
  731. } else
  732. len = snprintf(buf, PAGE_SIZE, "%s\n",
  733. aac_drivers[dev->cardtype].vname);
  734. return len;
  735. }
  736. static ssize_t aac_show_flags(struct device *cdev,
  737. struct device_attribute *attr, char *buf)
  738. {
  739. int len = 0;
  740. struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata;
  741. if (nblank(dprintk(x)))
  742. len = snprintf(buf, PAGE_SIZE, "dprintk\n");
  743. #ifdef AAC_DETAILED_STATUS_INFO
  744. len += snprintf(buf + len, PAGE_SIZE - len,
  745. "AAC_DETAILED_STATUS_INFO\n");
  746. #endif
  747. if (dev->raw_io_interface && dev->raw_io_64)
  748. len += snprintf(buf + len, PAGE_SIZE - len,
  749. "SAI_READ_CAPACITY_16\n");
  750. if (dev->jbod)
  751. len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
  752. if (dev->supplement_adapter_info.SupportedOptions2 &
  753. AAC_OPTION_POWER_MANAGEMENT)
  754. len += snprintf(buf + len, PAGE_SIZE - len,
  755. "SUPPORTED_POWER_MANAGEMENT\n");
  756. if (dev->msi)
  757. len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
  758. return len;
  759. }
  760. static ssize_t aac_show_kernel_version(struct device *device,
  761. struct device_attribute *attr,
  762. char *buf)
  763. {
  764. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  765. int len, tmp;
  766. tmp = le32_to_cpu(dev->adapter_info.kernelrev);
  767. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  768. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  769. le32_to_cpu(dev->adapter_info.kernelbuild));
  770. return len;
  771. }
  772. static ssize_t aac_show_monitor_version(struct device *device,
  773. struct device_attribute *attr,
  774. char *buf)
  775. {
  776. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  777. int len, tmp;
  778. tmp = le32_to_cpu(dev->adapter_info.monitorrev);
  779. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  780. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  781. le32_to_cpu(dev->adapter_info.monitorbuild));
  782. return len;
  783. }
  784. static ssize_t aac_show_bios_version(struct device *device,
  785. struct device_attribute *attr,
  786. char *buf)
  787. {
  788. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  789. int len, tmp;
  790. tmp = le32_to_cpu(dev->adapter_info.biosrev);
  791. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  792. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  793. le32_to_cpu(dev->adapter_info.biosbuild));
  794. return len;
  795. }
  796. static ssize_t aac_show_serial_number(struct device *device,
  797. struct device_attribute *attr, char *buf)
  798. {
  799. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  800. int len = 0;
  801. if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
  802. len = snprintf(buf, 16, "%06X\n",
  803. le32_to_cpu(dev->adapter_info.serial[0]));
  804. if (len &&
  805. !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
  806. sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len],
  807. buf, len-1))
  808. len = snprintf(buf, 16, "%.*s\n",
  809. (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
  810. dev->supplement_adapter_info.MfgPcbaSerialNo);
  811. return min(len, 16);
  812. }
  813. static ssize_t aac_show_max_channel(struct device *device,
  814. struct device_attribute *attr, char *buf)
  815. {
  816. return snprintf(buf, PAGE_SIZE, "%d\n",
  817. class_to_shost(device)->max_channel);
  818. }
  819. static ssize_t aac_show_max_id(struct device *device,
  820. struct device_attribute *attr, char *buf)
  821. {
  822. return snprintf(buf, PAGE_SIZE, "%d\n",
  823. class_to_shost(device)->max_id);
  824. }
  825. static ssize_t aac_store_reset_adapter(struct device *device,
  826. struct device_attribute *attr,
  827. const char *buf, size_t count)
  828. {
  829. int retval = -EACCES;
  830. if (!capable(CAP_SYS_ADMIN))
  831. return retval;
  832. retval = aac_reset_adapter((struct aac_dev*)class_to_shost(device)->hostdata, buf[0] == '!');
  833. if (retval >= 0)
  834. retval = count;
  835. return retval;
  836. }
  837. static ssize_t aac_show_reset_adapter(struct device *device,
  838. struct device_attribute *attr,
  839. char *buf)
  840. {
  841. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  842. int len, tmp;
  843. tmp = aac_adapter_check_health(dev);
  844. if ((tmp == 0) && dev->in_reset)
  845. tmp = -EBUSY;
  846. len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp);
  847. return len;
  848. }
  849. static struct device_attribute aac_model = {
  850. .attr = {
  851. .name = "model",
  852. .mode = S_IRUGO,
  853. },
  854. .show = aac_show_model,
  855. };
  856. static struct device_attribute aac_vendor = {
  857. .attr = {
  858. .name = "vendor",
  859. .mode = S_IRUGO,
  860. },
  861. .show = aac_show_vendor,
  862. };
  863. static struct device_attribute aac_flags = {
  864. .attr = {
  865. .name = "flags",
  866. .mode = S_IRUGO,
  867. },
  868. .show = aac_show_flags,
  869. };
  870. static struct device_attribute aac_kernel_version = {
  871. .attr = {
  872. .name = "hba_kernel_version",
  873. .mode = S_IRUGO,
  874. },
  875. .show = aac_show_kernel_version,
  876. };
  877. static struct device_attribute aac_monitor_version = {
  878. .attr = {
  879. .name = "hba_monitor_version",
  880. .mode = S_IRUGO,
  881. },
  882. .show = aac_show_monitor_version,
  883. };
  884. static struct device_attribute aac_bios_version = {
  885. .attr = {
  886. .name = "hba_bios_version",
  887. .mode = S_IRUGO,
  888. },
  889. .show = aac_show_bios_version,
  890. };
  891. static struct device_attribute aac_serial_number = {
  892. .attr = {
  893. .name = "serial_number",
  894. .mode = S_IRUGO,
  895. },
  896. .show = aac_show_serial_number,
  897. };
  898. static struct device_attribute aac_max_channel = {
  899. .attr = {
  900. .name = "max_channel",
  901. .mode = S_IRUGO,
  902. },
  903. .show = aac_show_max_channel,
  904. };
  905. static struct device_attribute aac_max_id = {
  906. .attr = {
  907. .name = "max_id",
  908. .mode = S_IRUGO,
  909. },
  910. .show = aac_show_max_id,
  911. };
  912. static struct device_attribute aac_reset = {
  913. .attr = {
  914. .name = "reset_host",
  915. .mode = S_IWUSR|S_IRUGO,
  916. },
  917. .store = aac_store_reset_adapter,
  918. .show = aac_show_reset_adapter,
  919. };
  920. static struct device_attribute *aac_attrs[] = {
  921. &aac_model,
  922. &aac_vendor,
  923. &aac_flags,
  924. &aac_kernel_version,
  925. &aac_monitor_version,
  926. &aac_bios_version,
  927. &aac_serial_number,
  928. &aac_max_channel,
  929. &aac_max_id,
  930. &aac_reset,
  931. NULL
  932. };
  933. ssize_t aac_get_serial_number(struct device *device, char *buf)
  934. {
  935. return aac_show_serial_number(device, &aac_serial_number, buf);
  936. }
  937. static const struct file_operations aac_cfg_fops = {
  938. .owner = THIS_MODULE,
  939. .unlocked_ioctl = aac_cfg_ioctl,
  940. #ifdef CONFIG_COMPAT
  941. .compat_ioctl = aac_compat_cfg_ioctl,
  942. #endif
  943. .open = aac_cfg_open,
  944. .llseek = noop_llseek,
  945. };
  946. static struct scsi_host_template aac_driver_template = {
  947. .module = THIS_MODULE,
  948. .name = "AAC",
  949. .proc_name = AAC_DRIVERNAME,
  950. .info = aac_info,
  951. .ioctl = aac_ioctl,
  952. #ifdef CONFIG_COMPAT
  953. .compat_ioctl = aac_compat_ioctl,
  954. #endif
  955. .queuecommand = aac_queuecommand,
  956. .bios_param = aac_biosparm,
  957. .shost_attrs = aac_attrs,
  958. .slave_configure = aac_slave_configure,
  959. .change_queue_depth = aac_change_queue_depth,
  960. .sdev_attrs = aac_dev_attrs,
  961. .eh_abort_handler = aac_eh_abort,
  962. .eh_host_reset_handler = aac_eh_reset,
  963. .can_queue = AAC_NUM_IO_FIB,
  964. .this_id = MAXIMUM_NUM_CONTAINERS,
  965. .sg_tablesize = 16,
  966. .max_sectors = 128,
  967. #if (AAC_NUM_IO_FIB > 256)
  968. .cmd_per_lun = 256,
  969. #else
  970. .cmd_per_lun = AAC_NUM_IO_FIB,
  971. #endif
  972. .use_clustering = ENABLE_CLUSTERING,
  973. .emulated = 1,
  974. .no_write_same = 1,
  975. };
  976. static void __aac_shutdown(struct aac_dev * aac)
  977. {
  978. int i;
  979. int cpu;
  980. aac_send_shutdown(aac);
  981. if (aac->aif_thread) {
  982. int i;
  983. /* Clear out events first */
  984. for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
  985. struct fib *fib = &aac->fibs[i];
  986. if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
  987. (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
  988. up(&fib->event_wait);
  989. }
  990. kthread_stop(aac->thread);
  991. aac->thread = NULL;
  992. }
  993. aac_adapter_disable_int(aac);
  994. cpu = cpumask_first(cpu_online_mask);
  995. if (aac->pdev->device == PMC_DEVICE_S6 ||
  996. aac->pdev->device == PMC_DEVICE_S7 ||
  997. aac->pdev->device == PMC_DEVICE_S8 ||
  998. aac->pdev->device == PMC_DEVICE_S9) {
  999. if (aac->max_msix > 1) {
  1000. for (i = 0; i < aac->max_msix; i++) {
  1001. if (irq_set_affinity_hint(
  1002. aac->msixentry[i].vector,
  1003. NULL)) {
  1004. printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n",
  1005. aac->name,
  1006. aac->id,
  1007. cpu);
  1008. }
  1009. cpu = cpumask_next(cpu,
  1010. cpu_online_mask);
  1011. free_irq(aac->msixentry[i].vector,
  1012. &(aac->aac_msix[i]));
  1013. }
  1014. } else {
  1015. free_irq(aac->pdev->irq,
  1016. &(aac->aac_msix[0]));
  1017. }
  1018. } else {
  1019. free_irq(aac->pdev->irq, aac);
  1020. }
  1021. if (aac->msi)
  1022. pci_disable_msi(aac->pdev);
  1023. else if (aac->max_msix > 1)
  1024. pci_disable_msix(aac->pdev);
  1025. }
  1026. static void aac_init_char(void)
  1027. {
  1028. aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops);
  1029. if (aac_cfg_major < 0) {
  1030. pr_err("aacraid: unable to register \"aac\" device.\n");
  1031. }
  1032. }
  1033. static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  1034. {
  1035. unsigned index = id->driver_data;
  1036. struct Scsi_Host *shost;
  1037. struct aac_dev *aac;
  1038. struct list_head *insert = &aac_devices;
  1039. int error = -ENODEV;
  1040. int unique_id = 0;
  1041. u64 dmamask;
  1042. extern int aac_sync_mode;
  1043. /*
  1044. * Only series 7 needs freset.
  1045. */
  1046. if (pdev->device == PMC_DEVICE_S7)
  1047. pdev->needs_freset = 1;
  1048. list_for_each_entry(aac, &aac_devices, entry) {
  1049. if (aac->id > unique_id)
  1050. break;
  1051. insert = &aac->entry;
  1052. unique_id++;
  1053. }
  1054. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
  1055. PCIE_LINK_STATE_CLKPM);
  1056. error = pci_enable_device(pdev);
  1057. if (error)
  1058. goto out;
  1059. error = -ENODEV;
  1060. /*
  1061. * If the quirk31 bit is set, the adapter needs adapter
  1062. * to driver communication memory to be allocated below 2gig
  1063. */
  1064. if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
  1065. dmamask = DMA_BIT_MASK(31);
  1066. else
  1067. dmamask = DMA_BIT_MASK(32);
  1068. if (pci_set_dma_mask(pdev, dmamask) ||
  1069. pci_set_consistent_dma_mask(pdev, dmamask))
  1070. goto out_disable_pdev;
  1071. pci_set_master(pdev);
  1072. shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
  1073. if (!shost)
  1074. goto out_disable_pdev;
  1075. shost->irq = pdev->irq;
  1076. shost->unique_id = unique_id;
  1077. shost->max_cmd_len = 16;
  1078. shost->use_cmd_list = 1;
  1079. if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
  1080. aac_init_char();
  1081. aac = (struct aac_dev *)shost->hostdata;
  1082. aac->base_start = pci_resource_start(pdev, 0);
  1083. aac->scsi_host_ptr = shost;
  1084. aac->pdev = pdev;
  1085. aac->name = aac_driver_template.name;
  1086. aac->id = shost->unique_id;
  1087. aac->cardtype = index;
  1088. INIT_LIST_HEAD(&aac->entry);
  1089. aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
  1090. if (!aac->fibs)
  1091. goto out_free_host;
  1092. spin_lock_init(&aac->fib_lock);
  1093. mutex_init(&aac->ioctl_mutex);
  1094. /*
  1095. * Map in the registers from the adapter.
  1096. */
  1097. aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
  1098. if ((*aac_drivers[index].init)(aac)) {
  1099. error = -ENODEV;
  1100. goto out_unmap;
  1101. }
  1102. if (aac->sync_mode) {
  1103. if (aac_sync_mode)
  1104. printk(KERN_INFO "%s%d: Sync. mode enforced "
  1105. "by driver parameter. This will cause "
  1106. "a significant performance decrease!\n",
  1107. aac->name,
  1108. aac->id);
  1109. else
  1110. printk(KERN_INFO "%s%d: Async. mode not supported "
  1111. "by current driver, sync. mode enforced."
  1112. "\nPlease update driver to get full performance.\n",
  1113. aac->name,
  1114. aac->id);
  1115. }
  1116. /*
  1117. * Start any kernel threads needed
  1118. */
  1119. aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
  1120. if (IS_ERR(aac->thread)) {
  1121. printk(KERN_ERR "aacraid: Unable to create command thread.\n");
  1122. error = PTR_ERR(aac->thread);
  1123. aac->thread = NULL;
  1124. goto out_deinit;
  1125. }
  1126. /*
  1127. * If we had set a smaller DMA mask earlier, set it to 4gig
  1128. * now since the adapter can dma data to at least a 4gig
  1129. * address space.
  1130. */
  1131. if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
  1132. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
  1133. goto out_deinit;
  1134. aac->maximum_num_channels = aac_drivers[index].channels;
  1135. error = aac_get_adapter_info(aac);
  1136. if (error < 0)
  1137. goto out_deinit;
  1138. /*
  1139. * Lets override negotiations and drop the maximum SG limit to 34
  1140. */
  1141. if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
  1142. (shost->sg_tablesize > 34)) {
  1143. shost->sg_tablesize = 34;
  1144. shost->max_sectors = (shost->sg_tablesize * 8) + 112;
  1145. }
  1146. if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
  1147. (shost->sg_tablesize > 17)) {
  1148. shost->sg_tablesize = 17;
  1149. shost->max_sectors = (shost->sg_tablesize * 8) + 112;
  1150. }
  1151. error = pci_set_dma_max_seg_size(pdev,
  1152. (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
  1153. (shost->max_sectors << 9) : 65536);
  1154. if (error)
  1155. goto out_deinit;
  1156. /*
  1157. * Firmware printf works only with older firmware.
  1158. */
  1159. if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
  1160. aac->printf_enabled = 1;
  1161. else
  1162. aac->printf_enabled = 0;
  1163. /*
  1164. * max channel will be the physical channels plus 1 virtual channel
  1165. * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
  1166. * physical channels are address by their actual physical number+1
  1167. */
  1168. if (aac->nondasd_support || expose_physicals || aac->jbod)
  1169. shost->max_channel = aac->maximum_num_channels;
  1170. else
  1171. shost->max_channel = 0;
  1172. aac_get_config_status(aac, 0);
  1173. aac_get_containers(aac);
  1174. list_add(&aac->entry, insert);
  1175. shost->max_id = aac->maximum_num_containers;
  1176. if (shost->max_id < aac->maximum_num_physicals)
  1177. shost->max_id = aac->maximum_num_physicals;
  1178. if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
  1179. shost->max_id = MAXIMUM_NUM_CONTAINERS;
  1180. else
  1181. shost->this_id = shost->max_id;
  1182. if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
  1183. aac_intr_normal(aac, 0, 2, 0, NULL);
  1184. /*
  1185. * dmb - we may need to move the setting of these parms somewhere else once
  1186. * we get a fib that can report the actual numbers
  1187. */
  1188. shost->max_lun = AAC_MAX_LUN;
  1189. pci_set_drvdata(pdev, shost);
  1190. error = scsi_add_host(shost, &pdev->dev);
  1191. if (error)
  1192. goto out_deinit;
  1193. scsi_scan_host(shost);
  1194. pci_enable_pcie_error_reporting(pdev);
  1195. pci_save_state(pdev);
  1196. return 0;
  1197. out_deinit:
  1198. __aac_shutdown(aac);
  1199. out_unmap:
  1200. aac_fib_map_free(aac);
  1201. if (aac->comm_addr)
  1202. pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
  1203. aac->comm_phys);
  1204. kfree(aac->queues);
  1205. aac_adapter_ioremap(aac, 0);
  1206. kfree(aac->fibs);
  1207. kfree(aac->fsa_dev);
  1208. out_free_host:
  1209. scsi_host_put(shost);
  1210. out_disable_pdev:
  1211. pci_disable_device(pdev);
  1212. out:
  1213. return error;
  1214. }
  1215. static void aac_release_resources(struct aac_dev *aac)
  1216. {
  1217. int i;
  1218. aac_adapter_disable_int(aac);
  1219. if (aac->pdev->device == PMC_DEVICE_S6 ||
  1220. aac->pdev->device == PMC_DEVICE_S7 ||
  1221. aac->pdev->device == PMC_DEVICE_S8 ||
  1222. aac->pdev->device == PMC_DEVICE_S9) {
  1223. if (aac->max_msix > 1) {
  1224. for (i = 0; i < aac->max_msix; i++)
  1225. free_irq(aac->msixentry[i].vector,
  1226. &(aac->aac_msix[i]));
  1227. } else {
  1228. free_irq(aac->pdev->irq, &(aac->aac_msix[0]));
  1229. }
  1230. } else {
  1231. free_irq(aac->pdev->irq, aac);
  1232. }
  1233. if (aac->msi)
  1234. pci_disable_msi(aac->pdev);
  1235. else if (aac->max_msix > 1)
  1236. pci_disable_msix(aac->pdev);
  1237. }
  1238. static int aac_acquire_resources(struct aac_dev *dev)
  1239. {
  1240. int i, j;
  1241. int instance = dev->id;
  1242. const char *name = dev->name;
  1243. unsigned long status;
  1244. /*
  1245. * First clear out all interrupts. Then enable the one's that we
  1246. * can handle.
  1247. */
  1248. while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)
  1249. || status == 0xffffffff)
  1250. msleep(20);
  1251. aac_adapter_disable_int(dev);
  1252. aac_adapter_enable_int(dev);
  1253. if ((dev->pdev->device == PMC_DEVICE_S7 ||
  1254. dev->pdev->device == PMC_DEVICE_S8 ||
  1255. dev->pdev->device == PMC_DEVICE_S9))
  1256. aac_define_int_mode(dev);
  1257. if (dev->msi_enabled)
  1258. aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
  1259. if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
  1260. for (i = 0; i < dev->max_msix; i++) {
  1261. dev->aac_msix[i].vector_no = i;
  1262. dev->aac_msix[i].dev = dev;
  1263. if (request_irq(dev->msixentry[i].vector,
  1264. dev->a_ops.adapter_intr,
  1265. 0, "aacraid", &(dev->aac_msix[i]))) {
  1266. printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
  1267. name, instance, i);
  1268. for (j = 0 ; j < i ; j++)
  1269. free_irq(dev->msixentry[j].vector,
  1270. &(dev->aac_msix[j]));
  1271. pci_disable_msix(dev->pdev);
  1272. goto error_iounmap;
  1273. }
  1274. }
  1275. } else {
  1276. dev->aac_msix[0].vector_no = 0;
  1277. dev->aac_msix[0].dev = dev;
  1278. if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
  1279. IRQF_SHARED, "aacraid",
  1280. &(dev->aac_msix[0])) < 0) {
  1281. if (dev->msi)
  1282. pci_disable_msi(dev->pdev);
  1283. printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
  1284. name, instance);
  1285. goto error_iounmap;
  1286. }
  1287. }
  1288. aac_adapter_enable_int(dev);
  1289. /*max msix may change after EEH
  1290. * Re-assign vectors to fibs
  1291. */
  1292. aac_fib_vector_assign(dev);
  1293. if (!dev->sync_mode) {
  1294. /* After EEH recovery or suspend resume, max_msix count
  1295. * may change, therfore updating in init as well.
  1296. */
  1297. dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
  1298. aac_adapter_start(dev);
  1299. }
  1300. return 0;
  1301. error_iounmap:
  1302. return -1;
  1303. }
  1304. #if (defined(CONFIG_PM))
  1305. static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
  1306. {
  1307. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1308. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1309. scsi_block_requests(shost);
  1310. aac_send_shutdown(aac);
  1311. aac_release_resources(aac);
  1312. pci_set_drvdata(pdev, shost);
  1313. pci_save_state(pdev);
  1314. pci_disable_device(pdev);
  1315. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  1316. return 0;
  1317. }
  1318. static int aac_resume(struct pci_dev *pdev)
  1319. {
  1320. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1321. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1322. int r;
  1323. pci_set_power_state(pdev, PCI_D0);
  1324. pci_enable_wake(pdev, PCI_D0, 0);
  1325. pci_restore_state(pdev);
  1326. r = pci_enable_device(pdev);
  1327. if (r)
  1328. goto fail_device;
  1329. pci_set_master(pdev);
  1330. if (aac_acquire_resources(aac))
  1331. goto fail_device;
  1332. /*
  1333. * reset this flag to unblock ioctl() as it was set at
  1334. * aac_send_shutdown() to block ioctls from upperlayer
  1335. */
  1336. aac->adapter_shutdown = 0;
  1337. scsi_unblock_requests(shost);
  1338. return 0;
  1339. fail_device:
  1340. printk(KERN_INFO "%s%d: resume failed.\n", aac->name, aac->id);
  1341. scsi_host_put(shost);
  1342. pci_disable_device(pdev);
  1343. return -ENODEV;
  1344. }
  1345. #endif
  1346. static void aac_shutdown(struct pci_dev *dev)
  1347. {
  1348. struct Scsi_Host *shost = pci_get_drvdata(dev);
  1349. scsi_block_requests(shost);
  1350. __aac_shutdown((struct aac_dev *)shost->hostdata);
  1351. }
  1352. static void aac_remove_one(struct pci_dev *pdev)
  1353. {
  1354. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1355. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1356. scsi_remove_host(shost);
  1357. __aac_shutdown(aac);
  1358. aac_fib_map_free(aac);
  1359. pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr,
  1360. aac->comm_phys);
  1361. kfree(aac->queues);
  1362. aac_adapter_ioremap(aac, 0);
  1363. kfree(aac->fibs);
  1364. kfree(aac->fsa_dev);
  1365. list_del(&aac->entry);
  1366. scsi_host_put(shost);
  1367. pci_disable_device(pdev);
  1368. if (list_empty(&aac_devices)) {
  1369. unregister_chrdev(aac_cfg_major, "aac");
  1370. aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT;
  1371. }
  1372. }
  1373. static void aac_flush_ios(struct aac_dev *aac)
  1374. {
  1375. int i;
  1376. struct scsi_cmnd *cmd;
  1377. for (i = 0; i < aac->scsi_host_ptr->can_queue; i++) {
  1378. cmd = (struct scsi_cmnd *)aac->fibs[i].callback_data;
  1379. if (cmd && (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) {
  1380. scsi_dma_unmap(cmd);
  1381. if (aac->handle_pci_error)
  1382. cmd->result = DID_NO_CONNECT << 16;
  1383. else
  1384. cmd->result = DID_RESET << 16;
  1385. cmd->scsi_done(cmd);
  1386. }
  1387. }
  1388. }
  1389. static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
  1390. enum pci_channel_state error)
  1391. {
  1392. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1393. struct aac_dev *aac = shost_priv(shost);
  1394. dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error);
  1395. switch (error) {
  1396. case pci_channel_io_normal:
  1397. return PCI_ERS_RESULT_CAN_RECOVER;
  1398. case pci_channel_io_frozen:
  1399. aac->handle_pci_error = 1;
  1400. scsi_block_requests(aac->scsi_host_ptr);
  1401. aac_flush_ios(aac);
  1402. aac_release_resources(aac);
  1403. pci_disable_pcie_error_reporting(pdev);
  1404. aac_adapter_ioremap(aac, 0);
  1405. return PCI_ERS_RESULT_NEED_RESET;
  1406. case pci_channel_io_perm_failure:
  1407. aac->handle_pci_error = 1;
  1408. aac_flush_ios(aac);
  1409. return PCI_ERS_RESULT_DISCONNECT;
  1410. }
  1411. return PCI_ERS_RESULT_NEED_RESET;
  1412. }
  1413. static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev)
  1414. {
  1415. dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n");
  1416. return PCI_ERS_RESULT_NEED_RESET;
  1417. }
  1418. static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev)
  1419. {
  1420. dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n");
  1421. pci_restore_state(pdev);
  1422. if (pci_enable_device(pdev)) {
  1423. dev_warn(&pdev->dev,
  1424. "aacraid: failed to enable slave\n");
  1425. goto fail_device;
  1426. }
  1427. pci_set_master(pdev);
  1428. if (pci_enable_device_mem(pdev)) {
  1429. dev_err(&pdev->dev, "pci_enable_device_mem failed\n");
  1430. goto fail_device;
  1431. }
  1432. return PCI_ERS_RESULT_RECOVERED;
  1433. fail_device:
  1434. dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n");
  1435. return PCI_ERS_RESULT_DISCONNECT;
  1436. }
  1437. static void aac_pci_resume(struct pci_dev *pdev)
  1438. {
  1439. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1440. struct scsi_device *sdev = NULL;
  1441. struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
  1442. pci_cleanup_aer_uncorrect_error_status(pdev);
  1443. if (aac_adapter_ioremap(aac, aac->base_size)) {
  1444. dev_err(&pdev->dev, "aacraid: ioremap failed\n");
  1445. /* remap failed, go back ... */
  1446. aac->comm_interface = AAC_COMM_PRODUCER;
  1447. if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) {
  1448. dev_warn(&pdev->dev,
  1449. "aacraid: unable to map adapter.\n");
  1450. return;
  1451. }
  1452. }
  1453. msleep(10000);
  1454. aac_acquire_resources(aac);
  1455. /*
  1456. * reset this flag to unblock ioctl() as it was set
  1457. * at aac_send_shutdown() to block ioctls from upperlayer
  1458. */
  1459. aac->adapter_shutdown = 0;
  1460. aac->handle_pci_error = 0;
  1461. shost_for_each_device(sdev, shost)
  1462. if (sdev->sdev_state == SDEV_OFFLINE)
  1463. sdev->sdev_state = SDEV_RUNNING;
  1464. scsi_unblock_requests(aac->scsi_host_ptr);
  1465. scsi_scan_host(aac->scsi_host_ptr);
  1466. pci_save_state(pdev);
  1467. dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
  1468. }
  1469. static struct pci_error_handlers aac_pci_err_handler = {
  1470. .error_detected = aac_pci_error_detected,
  1471. .mmio_enabled = aac_pci_mmio_enabled,
  1472. .slot_reset = aac_pci_slot_reset,
  1473. .resume = aac_pci_resume,
  1474. };
  1475. static struct pci_driver aac_pci_driver = {
  1476. .name = AAC_DRIVERNAME,
  1477. .id_table = aac_pci_tbl,
  1478. .probe = aac_probe_one,
  1479. .remove = aac_remove_one,
  1480. #if (defined(CONFIG_PM))
  1481. .suspend = aac_suspend,
  1482. .resume = aac_resume,
  1483. #endif
  1484. .shutdown = aac_shutdown,
  1485. .err_handler = &aac_pci_err_handler,
  1486. };
  1487. static int __init aac_init(void)
  1488. {
  1489. int error;
  1490. printk(KERN_INFO "Adaptec %s driver %s\n",
  1491. AAC_DRIVERNAME, aac_driver_version);
  1492. error = pci_register_driver(&aac_pci_driver);
  1493. if (error < 0)
  1494. return error;
  1495. aac_init_char();
  1496. return 0;
  1497. }
  1498. static void __exit aac_exit(void)
  1499. {
  1500. if (aac_cfg_major > -1)
  1501. unregister_chrdev(aac_cfg_major, "aac");
  1502. pci_unregister_driver(&aac_pci_driver);
  1503. }
  1504. module_init(aac_init);
  1505. module_exit(aac_exit);