hptiop.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312
  1. /*
  2. * HighPoint RR3xxx/4xxx controller driver for Linux
  3. * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * Please report bugs/comments/suggestions to linux@highpoint-tech.com
  15. *
  16. * For more information, visit http://www.highpoint-tech.com
  17. */
  18. #include <linux/module.h>
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kernel.h>
  22. #include <linux/pci.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/errno.h>
  25. #include <linux/delay.h>
  26. #include <linux/timer.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/gfp.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/io.h>
  31. #include <asm/div64.h>
  32. #include <scsi/scsi_cmnd.h>
  33. #include <scsi/scsi_device.h>
  34. #include <scsi/scsi.h>
  35. #include <scsi/scsi_tcq.h>
  36. #include <scsi/scsi_host.h>
  37. #include "hptiop.h"
  38. MODULE_AUTHOR("HighPoint Technologies, Inc.");
  39. MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
  40. static char driver_name[] = "hptiop";
  41. static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
  42. static const char driver_ver[] = "v1.6 (090910)";
  43. static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
  44. static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
  45. struct hpt_iop_request_scsi_command *req);
  46. static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
  47. static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
  48. static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
  49. static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
  50. {
  51. u32 req = 0;
  52. int i;
  53. for (i = 0; i < millisec; i++) {
  54. req = readl(&hba->u.itl.iop->inbound_queue);
  55. if (req != IOPMU_QUEUE_EMPTY)
  56. break;
  57. msleep(1);
  58. }
  59. if (req != IOPMU_QUEUE_EMPTY) {
  60. writel(req, &hba->u.itl.iop->outbound_queue);
  61. readl(&hba->u.itl.iop->outbound_intstatus);
  62. return 0;
  63. }
  64. return -1;
  65. }
  66. static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
  67. {
  68. return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
  69. }
  70. static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
  71. {
  72. if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
  73. hptiop_host_request_callback_itl(hba,
  74. tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
  75. else
  76. hptiop_iop_request_callback_itl(hba, tag);
  77. }
  78. static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
  79. {
  80. u32 req;
  81. while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
  82. IOPMU_QUEUE_EMPTY) {
  83. if (req & IOPMU_QUEUE_MASK_HOST_BITS)
  84. hptiop_request_callback_itl(hba, req);
  85. else {
  86. struct hpt_iop_request_header __iomem * p;
  87. p = (struct hpt_iop_request_header __iomem *)
  88. ((char __iomem *)hba->u.itl.iop + req);
  89. if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
  90. if (readl(&p->context))
  91. hptiop_request_callback_itl(hba, req);
  92. else
  93. writel(1, &p->context);
  94. }
  95. else
  96. hptiop_request_callback_itl(hba, req);
  97. }
  98. }
  99. }
  100. static int iop_intr_itl(struct hptiop_hba *hba)
  101. {
  102. struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
  103. void __iomem *plx = hba->u.itl.plx;
  104. u32 status;
  105. int ret = 0;
  106. if (plx && readl(plx + 0x11C5C) & 0xf)
  107. writel(1, plx + 0x11C60);
  108. status = readl(&iop->outbound_intstatus);
  109. if (status & IOPMU_OUTBOUND_INT_MSG0) {
  110. u32 msg = readl(&iop->outbound_msgaddr0);
  111. dprintk("received outbound msg %x\n", msg);
  112. writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
  113. hptiop_message_callback(hba, msg);
  114. ret = 1;
  115. }
  116. if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
  117. hptiop_drain_outbound_queue_itl(hba);
  118. ret = 1;
  119. }
  120. return ret;
  121. }
  122. static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
  123. {
  124. u32 outbound_tail = readl(&mu->outbound_tail);
  125. u32 outbound_head = readl(&mu->outbound_head);
  126. if (outbound_tail != outbound_head) {
  127. u64 p;
  128. memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
  129. outbound_tail++;
  130. if (outbound_tail == MVIOP_QUEUE_LEN)
  131. outbound_tail = 0;
  132. writel(outbound_tail, &mu->outbound_tail);
  133. return p;
  134. } else
  135. return 0;
  136. }
  137. static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
  138. {
  139. u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
  140. u32 head = inbound_head + 1;
  141. if (head == MVIOP_QUEUE_LEN)
  142. head = 0;
  143. memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
  144. writel(head, &hba->u.mv.mu->inbound_head);
  145. writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
  146. &hba->u.mv.regs->inbound_doorbell);
  147. }
  148. static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
  149. {
  150. u32 req_type = (tag >> 5) & 0x7;
  151. struct hpt_iop_request_scsi_command *req;
  152. dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
  153. BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
  154. switch (req_type) {
  155. case IOP_REQUEST_TYPE_GET_CONFIG:
  156. case IOP_REQUEST_TYPE_SET_CONFIG:
  157. hba->msg_done = 1;
  158. break;
  159. case IOP_REQUEST_TYPE_SCSI_COMMAND:
  160. req = hba->reqs[tag >> 8].req_virt;
  161. if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
  162. req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
  163. hptiop_finish_scsi_req(hba, tag>>8, req);
  164. break;
  165. default:
  166. break;
  167. }
  168. }
  169. static int iop_intr_mv(struct hptiop_hba *hba)
  170. {
  171. u32 status;
  172. int ret = 0;
  173. status = readl(&hba->u.mv.regs->outbound_doorbell);
  174. writel(~status, &hba->u.mv.regs->outbound_doorbell);
  175. if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
  176. u32 msg;
  177. msg = readl(&hba->u.mv.mu->outbound_msg);
  178. dprintk("received outbound msg %x\n", msg);
  179. hptiop_message_callback(hba, msg);
  180. ret = 1;
  181. }
  182. if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
  183. u64 tag;
  184. while ((tag = mv_outbound_read(hba->u.mv.mu)))
  185. hptiop_request_callback_mv(hba, tag);
  186. ret = 1;
  187. }
  188. return ret;
  189. }
  190. static int iop_send_sync_request_itl(struct hptiop_hba *hba,
  191. void __iomem *_req, u32 millisec)
  192. {
  193. struct hpt_iop_request_header __iomem *req = _req;
  194. u32 i;
  195. writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
  196. writel(0, &req->context);
  197. writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
  198. &hba->u.itl.iop->inbound_queue);
  199. readl(&hba->u.itl.iop->outbound_intstatus);
  200. for (i = 0; i < millisec; i++) {
  201. iop_intr_itl(hba);
  202. if (readl(&req->context))
  203. return 0;
  204. msleep(1);
  205. }
  206. return -1;
  207. }
  208. static int iop_send_sync_request_mv(struct hptiop_hba *hba,
  209. u32 size_bits, u32 millisec)
  210. {
  211. struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
  212. u32 i;
  213. hba->msg_done = 0;
  214. reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
  215. mv_inbound_write(hba->u.mv.internal_req_phy |
  216. MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
  217. for (i = 0; i < millisec; i++) {
  218. iop_intr_mv(hba);
  219. if (hba->msg_done)
  220. return 0;
  221. msleep(1);
  222. }
  223. return -1;
  224. }
  225. static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
  226. {
  227. writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
  228. readl(&hba->u.itl.iop->outbound_intstatus);
  229. }
  230. static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
  231. {
  232. writel(msg, &hba->u.mv.mu->inbound_msg);
  233. writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
  234. readl(&hba->u.mv.regs->inbound_doorbell);
  235. }
  236. static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
  237. {
  238. u32 i;
  239. hba->msg_done = 0;
  240. hba->ops->post_msg(hba, msg);
  241. for (i = 0; i < millisec; i++) {
  242. spin_lock_irq(hba->host->host_lock);
  243. hba->ops->iop_intr(hba);
  244. spin_unlock_irq(hba->host->host_lock);
  245. if (hba->msg_done)
  246. break;
  247. msleep(1);
  248. }
  249. return hba->msg_done? 0 : -1;
  250. }
  251. static int iop_get_config_itl(struct hptiop_hba *hba,
  252. struct hpt_iop_request_get_config *config)
  253. {
  254. u32 req32;
  255. struct hpt_iop_request_get_config __iomem *req;
  256. req32 = readl(&hba->u.itl.iop->inbound_queue);
  257. if (req32 == IOPMU_QUEUE_EMPTY)
  258. return -1;
  259. req = (struct hpt_iop_request_get_config __iomem *)
  260. ((unsigned long)hba->u.itl.iop + req32);
  261. writel(0, &req->header.flags);
  262. writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
  263. writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
  264. writel(IOP_RESULT_PENDING, &req->header.result);
  265. if (iop_send_sync_request_itl(hba, req, 20000)) {
  266. dprintk("Get config send cmd failed\n");
  267. return -1;
  268. }
  269. memcpy_fromio(config, req, sizeof(*config));
  270. writel(req32, &hba->u.itl.iop->outbound_queue);
  271. return 0;
  272. }
  273. static int iop_get_config_mv(struct hptiop_hba *hba,
  274. struct hpt_iop_request_get_config *config)
  275. {
  276. struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
  277. req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
  278. req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
  279. req->header.size =
  280. cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
  281. req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
  282. req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
  283. req->header.context_hi32 = 0;
  284. if (iop_send_sync_request_mv(hba, 0, 20000)) {
  285. dprintk("Get config send cmd failed\n");
  286. return -1;
  287. }
  288. memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
  289. return 0;
  290. }
  291. static int iop_set_config_itl(struct hptiop_hba *hba,
  292. struct hpt_iop_request_set_config *config)
  293. {
  294. u32 req32;
  295. struct hpt_iop_request_set_config __iomem *req;
  296. req32 = readl(&hba->u.itl.iop->inbound_queue);
  297. if (req32 == IOPMU_QUEUE_EMPTY)
  298. return -1;
  299. req = (struct hpt_iop_request_set_config __iomem *)
  300. ((unsigned long)hba->u.itl.iop + req32);
  301. memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
  302. (u8 *)config + sizeof(struct hpt_iop_request_header),
  303. sizeof(struct hpt_iop_request_set_config) -
  304. sizeof(struct hpt_iop_request_header));
  305. writel(0, &req->header.flags);
  306. writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
  307. writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
  308. writel(IOP_RESULT_PENDING, &req->header.result);
  309. if (iop_send_sync_request_itl(hba, req, 20000)) {
  310. dprintk("Set config send cmd failed\n");
  311. return -1;
  312. }
  313. writel(req32, &hba->u.itl.iop->outbound_queue);
  314. return 0;
  315. }
  316. static int iop_set_config_mv(struct hptiop_hba *hba,
  317. struct hpt_iop_request_set_config *config)
  318. {
  319. struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
  320. memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
  321. req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
  322. req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
  323. req->header.size =
  324. cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
  325. req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
  326. req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
  327. req->header.context_hi32 = 0;
  328. if (iop_send_sync_request_mv(hba, 0, 20000)) {
  329. dprintk("Set config send cmd failed\n");
  330. return -1;
  331. }
  332. return 0;
  333. }
  334. static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
  335. {
  336. writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
  337. &hba->u.itl.iop->outbound_intmask);
  338. }
  339. static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
  340. {
  341. writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
  342. &hba->u.mv.regs->outbound_intmask);
  343. }
  344. static int hptiop_initialize_iop(struct hptiop_hba *hba)
  345. {
  346. /* enable interrupts */
  347. hba->ops->enable_intr(hba);
  348. hba->initialized = 1;
  349. /* start background tasks */
  350. if (iop_send_sync_msg(hba,
  351. IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
  352. printk(KERN_ERR "scsi%d: fail to start background task\n",
  353. hba->host->host_no);
  354. return -1;
  355. }
  356. return 0;
  357. }
  358. static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
  359. {
  360. u32 mem_base_phy, length;
  361. void __iomem *mem_base_virt;
  362. struct pci_dev *pcidev = hba->pcidev;
  363. if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
  364. printk(KERN_ERR "scsi%d: pci resource invalid\n",
  365. hba->host->host_no);
  366. return NULL;
  367. }
  368. mem_base_phy = pci_resource_start(pcidev, index);
  369. length = pci_resource_len(pcidev, index);
  370. mem_base_virt = ioremap(mem_base_phy, length);
  371. if (!mem_base_virt) {
  372. printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
  373. hba->host->host_no);
  374. return NULL;
  375. }
  376. return mem_base_virt;
  377. }
  378. static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
  379. {
  380. struct pci_dev *pcidev = hba->pcidev;
  381. hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
  382. if (hba->u.itl.iop == NULL)
  383. return -1;
  384. if ((pcidev->device & 0xff00) == 0x4400) {
  385. hba->u.itl.plx = hba->u.itl.iop;
  386. hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
  387. if (hba->u.itl.iop == NULL) {
  388. iounmap(hba->u.itl.plx);
  389. return -1;
  390. }
  391. }
  392. return 0;
  393. }
  394. static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
  395. {
  396. if (hba->u.itl.plx)
  397. iounmap(hba->u.itl.plx);
  398. iounmap(hba->u.itl.iop);
  399. }
  400. static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
  401. {
  402. hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
  403. if (hba->u.mv.regs == NULL)
  404. return -1;
  405. hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
  406. if (hba->u.mv.mu == NULL) {
  407. iounmap(hba->u.mv.regs);
  408. return -1;
  409. }
  410. return 0;
  411. }
  412. static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
  413. {
  414. iounmap(hba->u.mv.regs);
  415. iounmap(hba->u.mv.mu);
  416. }
  417. static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
  418. {
  419. dprintk("iop message 0x%x\n", msg);
  420. if (msg == IOPMU_INBOUND_MSG0_NOP)
  421. hba->msg_done = 1;
  422. if (!hba->initialized)
  423. return;
  424. if (msg == IOPMU_INBOUND_MSG0_RESET) {
  425. atomic_set(&hba->resetting, 0);
  426. wake_up(&hba->reset_wq);
  427. }
  428. else if (msg <= IOPMU_INBOUND_MSG0_MAX)
  429. hba->msg_done = 1;
  430. }
  431. static struct hptiop_request *get_req(struct hptiop_hba *hba)
  432. {
  433. struct hptiop_request *ret;
  434. dprintk("get_req : req=%p\n", hba->req_list);
  435. ret = hba->req_list;
  436. if (ret)
  437. hba->req_list = ret->next;
  438. return ret;
  439. }
  440. static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
  441. {
  442. dprintk("free_req(%d, %p)\n", req->index, req);
  443. req->next = hba->req_list;
  444. hba->req_list = req;
  445. }
  446. static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
  447. struct hpt_iop_request_scsi_command *req)
  448. {
  449. struct scsi_cmnd *scp;
  450. dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
  451. "result=%d, context=0x%x tag=%d\n",
  452. req, req->header.type, req->header.result,
  453. req->header.context, tag);
  454. BUG_ON(!req->header.result);
  455. BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
  456. scp = hba->reqs[tag].scp;
  457. if (HPT_SCP(scp)->mapped)
  458. scsi_dma_unmap(scp);
  459. switch (le32_to_cpu(req->header.result)) {
  460. case IOP_RESULT_SUCCESS:
  461. scsi_set_resid(scp,
  462. scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
  463. scp->result = (DID_OK<<16);
  464. break;
  465. case IOP_RESULT_BAD_TARGET:
  466. scp->result = (DID_BAD_TARGET<<16);
  467. break;
  468. case IOP_RESULT_BUSY:
  469. scp->result = (DID_BUS_BUSY<<16);
  470. break;
  471. case IOP_RESULT_RESET:
  472. scp->result = (DID_RESET<<16);
  473. break;
  474. case IOP_RESULT_FAIL:
  475. scp->result = (DID_ERROR<<16);
  476. break;
  477. case IOP_RESULT_INVALID_REQUEST:
  478. scp->result = (DID_ABORT<<16);
  479. break;
  480. case IOP_RESULT_CHECK_CONDITION:
  481. scsi_set_resid(scp,
  482. scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
  483. scp->result = SAM_STAT_CHECK_CONDITION;
  484. memcpy(scp->sense_buffer, &req->sg_list,
  485. min_t(size_t, SCSI_SENSE_BUFFERSIZE,
  486. le32_to_cpu(req->dataxfer_length)));
  487. break;
  488. default:
  489. scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
  490. break;
  491. }
  492. dprintk("scsi_done(%p)\n", scp);
  493. scp->scsi_done(scp);
  494. free_req(hba, &hba->reqs[tag]);
  495. }
  496. static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
  497. {
  498. struct hpt_iop_request_scsi_command *req;
  499. u32 tag;
  500. if (hba->iopintf_v2) {
  501. tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
  502. req = hba->reqs[tag].req_virt;
  503. if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
  504. req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
  505. } else {
  506. tag = _tag;
  507. req = hba->reqs[tag].req_virt;
  508. }
  509. hptiop_finish_scsi_req(hba, tag, req);
  510. }
  511. void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
  512. {
  513. struct hpt_iop_request_header __iomem *req;
  514. struct hpt_iop_request_ioctl_command __iomem *p;
  515. struct hpt_ioctl_k *arg;
  516. req = (struct hpt_iop_request_header __iomem *)
  517. ((unsigned long)hba->u.itl.iop + tag);
  518. dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
  519. "result=%d, context=0x%x tag=%d\n",
  520. req, readl(&req->type), readl(&req->result),
  521. readl(&req->context), tag);
  522. BUG_ON(!readl(&req->result));
  523. BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
  524. p = (struct hpt_iop_request_ioctl_command __iomem *)req;
  525. arg = (struct hpt_ioctl_k *)(unsigned long)
  526. (readl(&req->context) |
  527. ((u64)readl(&req->context_hi32)<<32));
  528. if (readl(&req->result) == IOP_RESULT_SUCCESS) {
  529. arg->result = HPT_IOCTL_RESULT_OK;
  530. if (arg->outbuf_size)
  531. memcpy_fromio(arg->outbuf,
  532. &p->buf[(readl(&p->inbuf_size) + 3)& ~3],
  533. arg->outbuf_size);
  534. if (arg->bytes_returned)
  535. *arg->bytes_returned = arg->outbuf_size;
  536. }
  537. else
  538. arg->result = HPT_IOCTL_RESULT_FAILED;
  539. arg->done(arg);
  540. writel(tag, &hba->u.itl.iop->outbound_queue);
  541. }
  542. static irqreturn_t hptiop_intr(int irq, void *dev_id)
  543. {
  544. struct hptiop_hba *hba = dev_id;
  545. int handled;
  546. unsigned long flags;
  547. spin_lock_irqsave(hba->host->host_lock, flags);
  548. handled = hba->ops->iop_intr(hba);
  549. spin_unlock_irqrestore(hba->host->host_lock, flags);
  550. return handled;
  551. }
  552. static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
  553. {
  554. struct Scsi_Host *host = scp->device->host;
  555. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  556. struct scatterlist *sg;
  557. int idx, nseg;
  558. nseg = scsi_dma_map(scp);
  559. BUG_ON(nseg < 0);
  560. if (!nseg)
  561. return 0;
  562. HPT_SCP(scp)->sgcnt = nseg;
  563. HPT_SCP(scp)->mapped = 1;
  564. BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
  565. scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
  566. psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
  567. psg[idx].size = cpu_to_le32(sg_dma_len(sg));
  568. psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
  569. cpu_to_le32(1) : 0;
  570. }
  571. return HPT_SCP(scp)->sgcnt;
  572. }
  573. static void hptiop_post_req_itl(struct hptiop_hba *hba,
  574. struct hptiop_request *_req)
  575. {
  576. struct hpt_iop_request_header *reqhdr = _req->req_virt;
  577. reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
  578. (u32)_req->index);
  579. reqhdr->context_hi32 = 0;
  580. if (hba->iopintf_v2) {
  581. u32 size, size_bits;
  582. size = le32_to_cpu(reqhdr->size);
  583. if (size < 256)
  584. size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
  585. else if (size < 512)
  586. size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
  587. else
  588. size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
  589. IOPMU_QUEUE_ADDR_HOST_BIT;
  590. writel(_req->req_shifted_phy | size_bits,
  591. &hba->u.itl.iop->inbound_queue);
  592. } else
  593. writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
  594. &hba->u.itl.iop->inbound_queue);
  595. }
  596. static void hptiop_post_req_mv(struct hptiop_hba *hba,
  597. struct hptiop_request *_req)
  598. {
  599. struct hpt_iop_request_header *reqhdr = _req->req_virt;
  600. u32 size, size_bit;
  601. reqhdr->context = cpu_to_le32(_req->index<<8 |
  602. IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
  603. reqhdr->context_hi32 = 0;
  604. size = le32_to_cpu(reqhdr->size);
  605. if (size <= 256)
  606. size_bit = 0;
  607. else if (size <= 256*2)
  608. size_bit = 1;
  609. else if (size <= 256*3)
  610. size_bit = 2;
  611. else
  612. size_bit = 3;
  613. mv_inbound_write((_req->req_shifted_phy << 5) |
  614. MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
  615. }
  616. static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
  617. void (*done)(struct scsi_cmnd *))
  618. {
  619. struct Scsi_Host *host = scp->device->host;
  620. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  621. struct hpt_iop_request_scsi_command *req;
  622. int sg_count = 0;
  623. struct hptiop_request *_req;
  624. BUG_ON(!done);
  625. scp->scsi_done = done;
  626. _req = get_req(hba);
  627. if (_req == NULL) {
  628. dprintk("hptiop_queuecmd : no free req\n");
  629. return SCSI_MLQUEUE_HOST_BUSY;
  630. }
  631. _req->scp = scp;
  632. dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
  633. "req_index=%d, req=%p\n",
  634. scp,
  635. host->host_no, scp->device->channel,
  636. scp->device->id, scp->device->lun,
  637. ((u32 *)scp->cmnd)[0],
  638. ((u32 *)scp->cmnd)[1],
  639. ((u32 *)scp->cmnd)[2],
  640. _req->index, _req->req_virt);
  641. scp->result = 0;
  642. if (scp->device->channel || scp->device->lun ||
  643. scp->device->id > hba->max_devices) {
  644. scp->result = DID_BAD_TARGET << 16;
  645. free_req(hba, _req);
  646. goto cmd_done;
  647. }
  648. req = _req->req_virt;
  649. /* build S/G table */
  650. sg_count = hptiop_buildsgl(scp, req->sg_list);
  651. if (!sg_count)
  652. HPT_SCP(scp)->mapped = 0;
  653. req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
  654. req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
  655. req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
  656. req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
  657. req->channel = scp->device->channel;
  658. req->target = scp->device->id;
  659. req->lun = scp->device->lun;
  660. req->header.size = cpu_to_le32(
  661. sizeof(struct hpt_iop_request_scsi_command)
  662. - sizeof(struct hpt_iopsg)
  663. + sg_count * sizeof(struct hpt_iopsg));
  664. memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
  665. hba->ops->post_req(hba, _req);
  666. return 0;
  667. cmd_done:
  668. dprintk("scsi_done(scp=%p)\n", scp);
  669. scp->scsi_done(scp);
  670. return 0;
  671. }
  672. static DEF_SCSI_QCMD(hptiop_queuecommand)
  673. static const char *hptiop_info(struct Scsi_Host *host)
  674. {
  675. return driver_name_long;
  676. }
  677. static int hptiop_reset_hba(struct hptiop_hba *hba)
  678. {
  679. if (atomic_xchg(&hba->resetting, 1) == 0) {
  680. atomic_inc(&hba->reset_count);
  681. hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
  682. }
  683. wait_event_timeout(hba->reset_wq,
  684. atomic_read(&hba->resetting) == 0, 60 * HZ);
  685. if (atomic_read(&hba->resetting)) {
  686. /* IOP is in unknown state, abort reset */
  687. printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
  688. return -1;
  689. }
  690. if (iop_send_sync_msg(hba,
  691. IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
  692. dprintk("scsi%d: fail to start background task\n",
  693. hba->host->host_no);
  694. }
  695. return 0;
  696. }
  697. static int hptiop_reset(struct scsi_cmnd *scp)
  698. {
  699. struct Scsi_Host * host = scp->device->host;
  700. struct hptiop_hba * hba = (struct hptiop_hba *)host->hostdata;
  701. printk(KERN_WARNING "hptiop_reset(%d/%d/%d) scp=%p\n",
  702. scp->device->host->host_no, scp->device->channel,
  703. scp->device->id, scp);
  704. return hptiop_reset_hba(hba)? FAILED : SUCCESS;
  705. }
  706. static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
  707. int queue_depth, int reason)
  708. {
  709. struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
  710. if (reason != SCSI_QDEPTH_DEFAULT)
  711. return -EOPNOTSUPP;
  712. if (queue_depth > hba->max_requests)
  713. queue_depth = hba->max_requests;
  714. scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
  715. return queue_depth;
  716. }
  717. static ssize_t hptiop_show_version(struct device *dev,
  718. struct device_attribute *attr, char *buf)
  719. {
  720. return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
  721. }
  722. static ssize_t hptiop_show_fw_version(struct device *dev,
  723. struct device_attribute *attr, char *buf)
  724. {
  725. struct Scsi_Host *host = class_to_shost(dev);
  726. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  727. return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
  728. hba->firmware_version >> 24,
  729. (hba->firmware_version >> 16) & 0xff,
  730. (hba->firmware_version >> 8) & 0xff,
  731. hba->firmware_version & 0xff);
  732. }
  733. static struct device_attribute hptiop_attr_version = {
  734. .attr = {
  735. .name = "driver-version",
  736. .mode = S_IRUGO,
  737. },
  738. .show = hptiop_show_version,
  739. };
  740. static struct device_attribute hptiop_attr_fw_version = {
  741. .attr = {
  742. .name = "firmware-version",
  743. .mode = S_IRUGO,
  744. },
  745. .show = hptiop_show_fw_version,
  746. };
  747. static struct device_attribute *hptiop_attrs[] = {
  748. &hptiop_attr_version,
  749. &hptiop_attr_fw_version,
  750. NULL
  751. };
  752. static struct scsi_host_template driver_template = {
  753. .module = THIS_MODULE,
  754. .name = driver_name,
  755. .queuecommand = hptiop_queuecommand,
  756. .eh_device_reset_handler = hptiop_reset,
  757. .eh_bus_reset_handler = hptiop_reset,
  758. .info = hptiop_info,
  759. .emulated = 0,
  760. .use_clustering = ENABLE_CLUSTERING,
  761. .proc_name = driver_name,
  762. .shost_attrs = hptiop_attrs,
  763. .this_id = -1,
  764. .change_queue_depth = hptiop_adjust_disk_queue_depth,
  765. };
  766. static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
  767. {
  768. hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
  769. 0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
  770. if (hba->u.mv.internal_req)
  771. return 0;
  772. else
  773. return -1;
  774. }
  775. static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
  776. {
  777. if (hba->u.mv.internal_req) {
  778. dma_free_coherent(&hba->pcidev->dev, 0x800,
  779. hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
  780. return 0;
  781. } else
  782. return -1;
  783. }
  784. static int __devinit hptiop_probe(struct pci_dev *pcidev,
  785. const struct pci_device_id *id)
  786. {
  787. struct Scsi_Host *host = NULL;
  788. struct hptiop_hba *hba;
  789. struct hpt_iop_request_get_config iop_config;
  790. struct hpt_iop_request_set_config set_config;
  791. dma_addr_t start_phy;
  792. void *start_virt;
  793. u32 offset, i, req_size;
  794. dprintk("hptiop_probe(%p)\n", pcidev);
  795. if (pci_enable_device(pcidev)) {
  796. printk(KERN_ERR "hptiop: fail to enable pci device\n");
  797. return -ENODEV;
  798. }
  799. printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
  800. pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
  801. pcidev->irq);
  802. pci_set_master(pcidev);
  803. /* Enable 64bit DMA if possible */
  804. if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
  805. if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
  806. printk(KERN_ERR "hptiop: fail to set dma_mask\n");
  807. goto disable_pci_device;
  808. }
  809. }
  810. if (pci_request_regions(pcidev, driver_name)) {
  811. printk(KERN_ERR "hptiop: pci_request_regions failed\n");
  812. goto disable_pci_device;
  813. }
  814. host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
  815. if (!host) {
  816. printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
  817. goto free_pci_regions;
  818. }
  819. hba = (struct hptiop_hba *)host->hostdata;
  820. hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
  821. hba->pcidev = pcidev;
  822. hba->host = host;
  823. hba->initialized = 0;
  824. hba->iopintf_v2 = 0;
  825. atomic_set(&hba->resetting, 0);
  826. atomic_set(&hba->reset_count, 0);
  827. init_waitqueue_head(&hba->reset_wq);
  828. init_waitqueue_head(&hba->ioctl_wq);
  829. host->max_lun = 1;
  830. host->max_channel = 0;
  831. host->io_port = 0;
  832. host->n_io_port = 0;
  833. host->irq = pcidev->irq;
  834. if (hba->ops->map_pci_bar(hba))
  835. goto free_scsi_host;
  836. if (hba->ops->iop_wait_ready(hba, 20000)) {
  837. printk(KERN_ERR "scsi%d: firmware not ready\n",
  838. hba->host->host_no);
  839. goto unmap_pci_bar;
  840. }
  841. if (hba->ops->internal_memalloc) {
  842. if (hba->ops->internal_memalloc(hba)) {
  843. printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
  844. hba->host->host_no);
  845. goto unmap_pci_bar;
  846. }
  847. }
  848. if (hba->ops->get_config(hba, &iop_config)) {
  849. printk(KERN_ERR "scsi%d: get config failed\n",
  850. hba->host->host_no);
  851. goto unmap_pci_bar;
  852. }
  853. hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
  854. HPTIOP_MAX_REQUESTS);
  855. hba->max_devices = le32_to_cpu(iop_config.max_devices);
  856. hba->max_request_size = le32_to_cpu(iop_config.request_size);
  857. hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
  858. hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
  859. hba->interface_version = le32_to_cpu(iop_config.interface_version);
  860. hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
  861. if (hba->firmware_version > 0x01020000 ||
  862. hba->interface_version > 0x01020000)
  863. hba->iopintf_v2 = 1;
  864. host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
  865. host->max_id = le32_to_cpu(iop_config.max_devices);
  866. host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
  867. host->can_queue = le32_to_cpu(iop_config.max_requests);
  868. host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
  869. host->max_cmd_len = 16;
  870. req_size = sizeof(struct hpt_iop_request_scsi_command)
  871. + sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
  872. if ((req_size & 0x1f) != 0)
  873. req_size = (req_size + 0x1f) & ~0x1f;
  874. memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
  875. set_config.iop_id = cpu_to_le32(host->host_no);
  876. set_config.vbus_id = cpu_to_le16(host->host_no);
  877. set_config.max_host_request_size = cpu_to_le16(req_size);
  878. if (hba->ops->set_config(hba, &set_config)) {
  879. printk(KERN_ERR "scsi%d: set config failed\n",
  880. hba->host->host_no);
  881. goto unmap_pci_bar;
  882. }
  883. pci_set_drvdata(pcidev, host);
  884. if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
  885. driver_name, hba)) {
  886. printk(KERN_ERR "scsi%d: request irq %d failed\n",
  887. hba->host->host_no, pcidev->irq);
  888. goto unmap_pci_bar;
  889. }
  890. /* Allocate request mem */
  891. dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
  892. hba->req_size = req_size;
  893. start_virt = dma_alloc_coherent(&pcidev->dev,
  894. hba->req_size*hba->max_requests + 0x20,
  895. &start_phy, GFP_KERNEL);
  896. if (!start_virt) {
  897. printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
  898. hba->host->host_no);
  899. goto free_request_irq;
  900. }
  901. hba->dma_coherent = start_virt;
  902. hba->dma_coherent_handle = start_phy;
  903. if ((start_phy & 0x1f) != 0)
  904. {
  905. offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
  906. start_phy += offset;
  907. start_virt += offset;
  908. }
  909. hba->req_list = start_virt;
  910. for (i = 0; i < hba->max_requests; i++) {
  911. hba->reqs[i].next = NULL;
  912. hba->reqs[i].req_virt = start_virt;
  913. hba->reqs[i].req_shifted_phy = start_phy >> 5;
  914. hba->reqs[i].index = i;
  915. free_req(hba, &hba->reqs[i]);
  916. start_virt = (char *)start_virt + hba->req_size;
  917. start_phy = start_phy + hba->req_size;
  918. }
  919. /* Enable Interrupt and start background task */
  920. if (hptiop_initialize_iop(hba))
  921. goto free_request_mem;
  922. if (scsi_add_host(host, &pcidev->dev)) {
  923. printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
  924. hba->host->host_no);
  925. goto free_request_mem;
  926. }
  927. scsi_scan_host(host);
  928. dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
  929. return 0;
  930. free_request_mem:
  931. dma_free_coherent(&hba->pcidev->dev,
  932. hba->req_size * hba->max_requests + 0x20,
  933. hba->dma_coherent, hba->dma_coherent_handle);
  934. free_request_irq:
  935. free_irq(hba->pcidev->irq, hba);
  936. unmap_pci_bar:
  937. if (hba->ops->internal_memfree)
  938. hba->ops->internal_memfree(hba);
  939. hba->ops->unmap_pci_bar(hba);
  940. free_scsi_host:
  941. scsi_host_put(host);
  942. free_pci_regions:
  943. pci_release_regions(pcidev);
  944. disable_pci_device:
  945. pci_disable_device(pcidev);
  946. dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0);
  947. return -ENODEV;
  948. }
  949. static void hptiop_shutdown(struct pci_dev *pcidev)
  950. {
  951. struct Scsi_Host *host = pci_get_drvdata(pcidev);
  952. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  953. dprintk("hptiop_shutdown(%p)\n", hba);
  954. /* stop the iop */
  955. if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
  956. printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
  957. hba->host->host_no);
  958. /* disable all outbound interrupts */
  959. hba->ops->disable_intr(hba);
  960. }
  961. static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
  962. {
  963. u32 int_mask;
  964. int_mask = readl(&hba->u.itl.iop->outbound_intmask);
  965. writel(int_mask |
  966. IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
  967. &hba->u.itl.iop->outbound_intmask);
  968. readl(&hba->u.itl.iop->outbound_intmask);
  969. }
  970. static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
  971. {
  972. writel(0, &hba->u.mv.regs->outbound_intmask);
  973. readl(&hba->u.mv.regs->outbound_intmask);
  974. }
  975. static void hptiop_remove(struct pci_dev *pcidev)
  976. {
  977. struct Scsi_Host *host = pci_get_drvdata(pcidev);
  978. struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
  979. dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
  980. scsi_remove_host(host);
  981. hptiop_shutdown(pcidev);
  982. free_irq(hba->pcidev->irq, hba);
  983. dma_free_coherent(&hba->pcidev->dev,
  984. hba->req_size * hba->max_requests + 0x20,
  985. hba->dma_coherent,
  986. hba->dma_coherent_handle);
  987. if (hba->ops->internal_memfree)
  988. hba->ops->internal_memfree(hba);
  989. hba->ops->unmap_pci_bar(hba);
  990. pci_release_regions(hba->pcidev);
  991. pci_set_drvdata(hba->pcidev, NULL);
  992. pci_disable_device(hba->pcidev);
  993. scsi_host_put(host);
  994. }
  995. static struct hptiop_adapter_ops hptiop_itl_ops = {
  996. .iop_wait_ready = iop_wait_ready_itl,
  997. .internal_memalloc = NULL,
  998. .internal_memfree = NULL,
  999. .map_pci_bar = hptiop_map_pci_bar_itl,
  1000. .unmap_pci_bar = hptiop_unmap_pci_bar_itl,
  1001. .enable_intr = hptiop_enable_intr_itl,
  1002. .disable_intr = hptiop_disable_intr_itl,
  1003. .get_config = iop_get_config_itl,
  1004. .set_config = iop_set_config_itl,
  1005. .iop_intr = iop_intr_itl,
  1006. .post_msg = hptiop_post_msg_itl,
  1007. .post_req = hptiop_post_req_itl,
  1008. };
  1009. static struct hptiop_adapter_ops hptiop_mv_ops = {
  1010. .iop_wait_ready = iop_wait_ready_mv,
  1011. .internal_memalloc = hptiop_internal_memalloc_mv,
  1012. .internal_memfree = hptiop_internal_memfree_mv,
  1013. .map_pci_bar = hptiop_map_pci_bar_mv,
  1014. .unmap_pci_bar = hptiop_unmap_pci_bar_mv,
  1015. .enable_intr = hptiop_enable_intr_mv,
  1016. .disable_intr = hptiop_disable_intr_mv,
  1017. .get_config = iop_get_config_mv,
  1018. .set_config = iop_set_config_mv,
  1019. .iop_intr = iop_intr_mv,
  1020. .post_msg = hptiop_post_msg_mv,
  1021. .post_req = hptiop_post_req_mv,
  1022. };
  1023. static struct pci_device_id hptiop_id_table[] = {
  1024. { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
  1025. { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
  1026. { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
  1027. { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
  1028. { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
  1029. { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
  1030. { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
  1031. { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
  1032. { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
  1033. { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
  1034. { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
  1035. { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
  1036. { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
  1037. { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
  1038. { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
  1039. { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
  1040. { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
  1041. { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
  1042. { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops },
  1043. { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
  1044. { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
  1045. { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
  1046. {},
  1047. };
  1048. MODULE_DEVICE_TABLE(pci, hptiop_id_table);
  1049. static struct pci_driver hptiop_pci_driver = {
  1050. .name = driver_name,
  1051. .id_table = hptiop_id_table,
  1052. .probe = hptiop_probe,
  1053. .remove = hptiop_remove,
  1054. .shutdown = hptiop_shutdown,
  1055. };
  1056. static int __init hptiop_module_init(void)
  1057. {
  1058. printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
  1059. return pci_register_driver(&hptiop_pci_driver);
  1060. }
  1061. static void __exit hptiop_module_exit(void)
  1062. {
  1063. pci_unregister_driver(&hptiop_pci_driver);
  1064. }
  1065. module_init(hptiop_module_init);
  1066. module_exit(hptiop_module_exit);
  1067. MODULE_LICENSE("GPL");