ql4_bsg.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514
  1. /*
  2. * QLogic iSCSI HBA Driver
  3. * Copyright (c) 2011 QLogic Corporation
  4. *
  5. * See LICENSE.qla4xxx for copyright and licensing details.
  6. */
  7. #include "ql4_def.h"
  8. #include "ql4_glbl.h"
  9. #include "ql4_bsg.h"
  10. static int
  11. qla4xxx_read_flash(struct bsg_job *bsg_job)
  12. {
  13. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  14. struct scsi_qla_host *ha = to_qla_host(host);
  15. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  16. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  17. uint32_t offset = 0;
  18. uint32_t length = 0;
  19. dma_addr_t flash_dma;
  20. uint8_t *flash = NULL;
  21. int rval = -EINVAL;
  22. bsg_reply->reply_payload_rcv_len = 0;
  23. if (unlikely(pci_channel_offline(ha->pdev)))
  24. goto leave;
  25. if (ql4xxx_reset_active(ha)) {
  26. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  27. rval = -EBUSY;
  28. goto leave;
  29. }
  30. if (ha->flash_state != QLFLASH_WAITING) {
  31. ql4_printk(KERN_ERR, ha, "%s: another flash operation "
  32. "active\n", __func__);
  33. rval = -EBUSY;
  34. goto leave;
  35. }
  36. ha->flash_state = QLFLASH_READING;
  37. offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  38. length = bsg_job->reply_payload.payload_len;
  39. flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
  40. GFP_KERNEL);
  41. if (!flash) {
  42. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
  43. "data\n", __func__);
  44. rval = -ENOMEM;
  45. goto leave;
  46. }
  47. rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
  48. if (rval) {
  49. ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
  50. bsg_reply->result = DID_ERROR << 16;
  51. rval = -EIO;
  52. } else {
  53. bsg_reply->reply_payload_rcv_len =
  54. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  55. bsg_job->reply_payload.sg_cnt,
  56. flash, length);
  57. bsg_reply->result = DID_OK << 16;
  58. }
  59. bsg_job_done(bsg_job, bsg_reply->result,
  60. bsg_reply->reply_payload_rcv_len);
  61. dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
  62. leave:
  63. ha->flash_state = QLFLASH_WAITING;
  64. return rval;
  65. }
  66. static int
  67. qla4xxx_update_flash(struct bsg_job *bsg_job)
  68. {
  69. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  70. struct scsi_qla_host *ha = to_qla_host(host);
  71. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  72. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  73. uint32_t length = 0;
  74. uint32_t offset = 0;
  75. uint32_t options = 0;
  76. dma_addr_t flash_dma;
  77. uint8_t *flash = NULL;
  78. int rval = -EINVAL;
  79. bsg_reply->reply_payload_rcv_len = 0;
  80. if (unlikely(pci_channel_offline(ha->pdev)))
  81. goto leave;
  82. if (ql4xxx_reset_active(ha)) {
  83. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  84. rval = -EBUSY;
  85. goto leave;
  86. }
  87. if (ha->flash_state != QLFLASH_WAITING) {
  88. ql4_printk(KERN_ERR, ha, "%s: another flash operation "
  89. "active\n", __func__);
  90. rval = -EBUSY;
  91. goto leave;
  92. }
  93. ha->flash_state = QLFLASH_WRITING;
  94. length = bsg_job->request_payload.payload_len;
  95. offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  96. options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
  97. flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
  98. GFP_KERNEL);
  99. if (!flash) {
  100. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
  101. "data\n", __func__);
  102. rval = -ENOMEM;
  103. goto leave;
  104. }
  105. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  106. bsg_job->request_payload.sg_cnt, flash, length);
  107. rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
  108. if (rval) {
  109. ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
  110. bsg_reply->result = DID_ERROR << 16;
  111. rval = -EIO;
  112. } else
  113. bsg_reply->result = DID_OK << 16;
  114. bsg_job_done(bsg_job, bsg_reply->result,
  115. bsg_reply->reply_payload_rcv_len);
  116. dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
  117. leave:
  118. ha->flash_state = QLFLASH_WAITING;
  119. return rval;
  120. }
  121. static int
  122. qla4xxx_get_acb_state(struct bsg_job *bsg_job)
  123. {
  124. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  125. struct scsi_qla_host *ha = to_qla_host(host);
  126. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  127. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  128. uint32_t status[MBOX_REG_COUNT];
  129. uint32_t acb_idx;
  130. uint32_t ip_idx;
  131. int rval = -EINVAL;
  132. bsg_reply->reply_payload_rcv_len = 0;
  133. if (unlikely(pci_channel_offline(ha->pdev)))
  134. goto leave;
  135. /* Only 4022 and above adapters are supported */
  136. if (is_qla4010(ha))
  137. goto leave;
  138. if (ql4xxx_reset_active(ha)) {
  139. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  140. rval = -EBUSY;
  141. goto leave;
  142. }
  143. if (bsg_job->reply_payload.payload_len < sizeof(status)) {
  144. ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
  145. __func__, bsg_job->reply_payload.payload_len);
  146. rval = -EINVAL;
  147. goto leave;
  148. }
  149. acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  150. ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
  151. rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
  152. if (rval) {
  153. ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
  154. __func__);
  155. bsg_reply->result = DID_ERROR << 16;
  156. rval = -EIO;
  157. } else {
  158. bsg_reply->reply_payload_rcv_len =
  159. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  160. bsg_job->reply_payload.sg_cnt,
  161. status, sizeof(status));
  162. bsg_reply->result = DID_OK << 16;
  163. }
  164. bsg_job_done(bsg_job, bsg_reply->result,
  165. bsg_reply->reply_payload_rcv_len);
  166. leave:
  167. return rval;
  168. }
  169. static int
  170. qla4xxx_read_nvram(struct bsg_job *bsg_job)
  171. {
  172. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  173. struct scsi_qla_host *ha = to_qla_host(host);
  174. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  175. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  176. uint32_t offset = 0;
  177. uint32_t len = 0;
  178. uint32_t total_len = 0;
  179. dma_addr_t nvram_dma;
  180. uint8_t *nvram = NULL;
  181. int rval = -EINVAL;
  182. bsg_reply->reply_payload_rcv_len = 0;
  183. if (unlikely(pci_channel_offline(ha->pdev)))
  184. goto leave;
  185. /* Only 40xx adapters are supported */
  186. if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
  187. goto leave;
  188. if (ql4xxx_reset_active(ha)) {
  189. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  190. rval = -EBUSY;
  191. goto leave;
  192. }
  193. offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  194. len = bsg_job->reply_payload.payload_len;
  195. total_len = offset + len;
  196. /* total len should not be greater than max NVRAM size */
  197. if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
  198. ((is_qla4022(ha) || is_qla4032(ha)) &&
  199. total_len > QL40X2_NVRAM_SIZE)) {
  200. ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
  201. " nvram size, offset=%d len=%d\n",
  202. __func__, offset, len);
  203. goto leave;
  204. }
  205. nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
  206. GFP_KERNEL);
  207. if (!nvram) {
  208. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
  209. "data\n", __func__);
  210. rval = -ENOMEM;
  211. goto leave;
  212. }
  213. rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
  214. if (rval) {
  215. ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
  216. bsg_reply->result = DID_ERROR << 16;
  217. rval = -EIO;
  218. } else {
  219. bsg_reply->reply_payload_rcv_len =
  220. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  221. bsg_job->reply_payload.sg_cnt,
  222. nvram, len);
  223. bsg_reply->result = DID_OK << 16;
  224. }
  225. bsg_job_done(bsg_job, bsg_reply->result,
  226. bsg_reply->reply_payload_rcv_len);
  227. dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
  228. leave:
  229. return rval;
  230. }
  231. static int
  232. qla4xxx_update_nvram(struct bsg_job *bsg_job)
  233. {
  234. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  235. struct scsi_qla_host *ha = to_qla_host(host);
  236. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  237. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  238. uint32_t offset = 0;
  239. uint32_t len = 0;
  240. uint32_t total_len = 0;
  241. dma_addr_t nvram_dma;
  242. uint8_t *nvram = NULL;
  243. int rval = -EINVAL;
  244. bsg_reply->reply_payload_rcv_len = 0;
  245. if (unlikely(pci_channel_offline(ha->pdev)))
  246. goto leave;
  247. if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
  248. goto leave;
  249. if (ql4xxx_reset_active(ha)) {
  250. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  251. rval = -EBUSY;
  252. goto leave;
  253. }
  254. offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  255. len = bsg_job->request_payload.payload_len;
  256. total_len = offset + len;
  257. /* total len should not be greater than max NVRAM size */
  258. if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
  259. ((is_qla4022(ha) || is_qla4032(ha)) &&
  260. total_len > QL40X2_NVRAM_SIZE)) {
  261. ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
  262. " nvram size, offset=%d len=%d\n",
  263. __func__, offset, len);
  264. goto leave;
  265. }
  266. nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
  267. GFP_KERNEL);
  268. if (!nvram) {
  269. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
  270. "data\n", __func__);
  271. rval = -ENOMEM;
  272. goto leave;
  273. }
  274. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  275. bsg_job->request_payload.sg_cnt, nvram, len);
  276. rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
  277. if (rval) {
  278. ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
  279. bsg_reply->result = DID_ERROR << 16;
  280. rval = -EIO;
  281. } else
  282. bsg_reply->result = DID_OK << 16;
  283. bsg_job_done(bsg_job, bsg_reply->result,
  284. bsg_reply->reply_payload_rcv_len);
  285. dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
  286. leave:
  287. return rval;
  288. }
  289. static int
  290. qla4xxx_restore_defaults(struct bsg_job *bsg_job)
  291. {
  292. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  293. struct scsi_qla_host *ha = to_qla_host(host);
  294. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  295. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  296. uint32_t region = 0;
  297. uint32_t field0 = 0;
  298. uint32_t field1 = 0;
  299. int rval = -EINVAL;
  300. bsg_reply->reply_payload_rcv_len = 0;
  301. if (unlikely(pci_channel_offline(ha->pdev)))
  302. goto leave;
  303. if (is_qla4010(ha))
  304. goto leave;
  305. if (ql4xxx_reset_active(ha)) {
  306. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  307. rval = -EBUSY;
  308. goto leave;
  309. }
  310. region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  311. field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
  312. field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
  313. rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
  314. if (rval) {
  315. ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
  316. bsg_reply->result = DID_ERROR << 16;
  317. rval = -EIO;
  318. } else
  319. bsg_reply->result = DID_OK << 16;
  320. bsg_job_done(bsg_job, bsg_reply->result,
  321. bsg_reply->reply_payload_rcv_len);
  322. leave:
  323. return rval;
  324. }
  325. static int
  326. qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
  327. {
  328. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  329. struct scsi_qla_host *ha = to_qla_host(host);
  330. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  331. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  332. uint32_t acb_type = 0;
  333. uint32_t len = 0;
  334. dma_addr_t acb_dma;
  335. uint8_t *acb = NULL;
  336. int rval = -EINVAL;
  337. bsg_reply->reply_payload_rcv_len = 0;
  338. if (unlikely(pci_channel_offline(ha->pdev)))
  339. goto leave;
  340. /* Only 4022 and above adapters are supported */
  341. if (is_qla4010(ha))
  342. goto leave;
  343. if (ql4xxx_reset_active(ha)) {
  344. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  345. rval = -EBUSY;
  346. goto leave;
  347. }
  348. acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  349. len = bsg_job->reply_payload.payload_len;
  350. if (len < sizeof(struct addr_ctrl_blk)) {
  351. ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
  352. __func__, len);
  353. rval = -EINVAL;
  354. goto leave;
  355. }
  356. acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
  357. if (!acb) {
  358. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
  359. "data\n", __func__);
  360. rval = -ENOMEM;
  361. goto leave;
  362. }
  363. rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
  364. if (rval) {
  365. ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
  366. bsg_reply->result = DID_ERROR << 16;
  367. rval = -EIO;
  368. } else {
  369. bsg_reply->reply_payload_rcv_len =
  370. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  371. bsg_job->reply_payload.sg_cnt,
  372. acb, len);
  373. bsg_reply->result = DID_OK << 16;
  374. }
  375. bsg_job_done(bsg_job, bsg_reply->result,
  376. bsg_reply->reply_payload_rcv_len);
  377. dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
  378. leave:
  379. return rval;
  380. }
  381. /**
  382. * qla4xxx_process_vendor_specific - handle vendor specific bsg request
  383. * @job: iscsi_bsg_job to handle
  384. **/
  385. int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
  386. {
  387. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  388. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  389. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  390. struct scsi_qla_host *ha = to_qla_host(host);
  391. switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
  392. case QLISCSI_VND_READ_FLASH:
  393. return qla4xxx_read_flash(bsg_job);
  394. case QLISCSI_VND_UPDATE_FLASH:
  395. return qla4xxx_update_flash(bsg_job);
  396. case QLISCSI_VND_GET_ACB_STATE:
  397. return qla4xxx_get_acb_state(bsg_job);
  398. case QLISCSI_VND_READ_NVRAM:
  399. return qla4xxx_read_nvram(bsg_job);
  400. case QLISCSI_VND_UPDATE_NVRAM:
  401. return qla4xxx_update_nvram(bsg_job);
  402. case QLISCSI_VND_RESTORE_DEFAULTS:
  403. return qla4xxx_restore_defaults(bsg_job);
  404. case QLISCSI_VND_GET_ACB:
  405. return qla4xxx_bsg_get_acb(bsg_job);
  406. default:
  407. ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
  408. "0x%x\n", __func__, bsg_req->msgcode);
  409. bsg_reply->result = (DID_ERROR << 16);
  410. bsg_reply->reply_payload_rcv_len = 0;
  411. bsg_job_done(bsg_job, bsg_reply->result,
  412. bsg_reply->reply_payload_rcv_len);
  413. return -ENOSYS;
  414. }
  415. }
  416. /**
  417. * qla4xxx_bsg_request - handle bsg request from ISCSI transport
  418. * @job: iscsi_bsg_job to handle
  419. */
  420. int qla4xxx_bsg_request(struct bsg_job *bsg_job)
  421. {
  422. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  423. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  424. struct scsi_qla_host *ha = to_qla_host(host);
  425. switch (bsg_req->msgcode) {
  426. case ISCSI_BSG_HST_VENDOR:
  427. return qla4xxx_process_vendor_specific(bsg_job);
  428. default:
  429. ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
  430. __func__, bsg_req->msgcode);
  431. }
  432. return -ENOSYS;
  433. }