qla_mid.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2011 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_gbl.h"
  9. #include <linux/moduleparam.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/slab.h>
  12. #include <linux/list.h>
  13. #include <scsi/scsi_tcq.h>
  14. #include <scsi/scsicam.h>
  15. #include <linux/delay.h>
  16. void
  17. qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
  18. {
  19. if (vha->vp_idx && vha->timer_active) {
  20. del_timer_sync(&vha->timer);
  21. vha->timer_active = 0;
  22. }
  23. }
  24. static uint32_t
  25. qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
  26. {
  27. uint32_t vp_id;
  28. struct qla_hw_data *ha = vha->hw;
  29. unsigned long flags;
  30. /* Find an empty slot and assign an vp_id */
  31. mutex_lock(&ha->vport_lock);
  32. vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
  33. if (vp_id > ha->max_npiv_vports) {
  34. ql_dbg(ql_dbg_vport, vha, 0xa000,
  35. "vp_id %d is bigger than max-supported %d.\n",
  36. vp_id, ha->max_npiv_vports);
  37. mutex_unlock(&ha->vport_lock);
  38. return vp_id;
  39. }
  40. set_bit(vp_id, ha->vp_idx_map);
  41. ha->num_vhosts++;
  42. vha->vp_idx = vp_id;
  43. spin_lock_irqsave(&ha->vport_slock, flags);
  44. list_add_tail(&vha->list, &ha->vp_list);
  45. spin_unlock_irqrestore(&ha->vport_slock, flags);
  46. mutex_unlock(&ha->vport_lock);
  47. return vp_id;
  48. }
  49. void
  50. qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
  51. {
  52. uint16_t vp_id;
  53. struct qla_hw_data *ha = vha->hw;
  54. unsigned long flags = 0;
  55. mutex_lock(&ha->vport_lock);
  56. /*
  57. * Wait for all pending activities to finish before removing vport from
  58. * the list.
  59. * Lock needs to be held for safe removal from the list (it
  60. * ensures no active vp_list traversal while the vport is removed
  61. * from the queue)
  62. */
  63. spin_lock_irqsave(&ha->vport_slock, flags);
  64. while (atomic_read(&vha->vref_count)) {
  65. spin_unlock_irqrestore(&ha->vport_slock, flags);
  66. msleep(500);
  67. spin_lock_irqsave(&ha->vport_slock, flags);
  68. }
  69. list_del(&vha->list);
  70. spin_unlock_irqrestore(&ha->vport_slock, flags);
  71. vp_id = vha->vp_idx;
  72. ha->num_vhosts--;
  73. clear_bit(vp_id, ha->vp_idx_map);
  74. mutex_unlock(&ha->vport_lock);
  75. }
  76. static scsi_qla_host_t *
  77. qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
  78. {
  79. scsi_qla_host_t *vha;
  80. struct scsi_qla_host *tvha;
  81. unsigned long flags;
  82. spin_lock_irqsave(&ha->vport_slock, flags);
  83. /* Locate matching device in database. */
  84. list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
  85. if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
  86. spin_unlock_irqrestore(&ha->vport_slock, flags);
  87. return vha;
  88. }
  89. }
  90. spin_unlock_irqrestore(&ha->vport_slock, flags);
  91. return NULL;
  92. }
  93. /*
  94. * qla2x00_mark_vp_devices_dead
  95. * Updates fcport state when device goes offline.
  96. *
  97. * Input:
  98. * ha = adapter block pointer.
  99. * fcport = port structure pointer.
  100. *
  101. * Return:
  102. * None.
  103. *
  104. * Context:
  105. */
  106. static void
  107. qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
  108. {
  109. /*
  110. * !!! NOTE !!!
  111. * This function, if called in contexts other than vp create, disable
  112. * or delete, please make sure this is synchronized with the
  113. * delete thread.
  114. */
  115. fc_port_t *fcport;
  116. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  117. ql_dbg(ql_dbg_vport, vha, 0xa001,
  118. "Marking port dead, loop_id=0x%04x : %x.\n",
  119. fcport->loop_id, fcport->vp_idx);
  120. qla2x00_mark_device_lost(vha, fcport, 0, 0);
  121. qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
  122. }
  123. }
  124. int
  125. qla24xx_disable_vp(scsi_qla_host_t *vha)
  126. {
  127. int ret;
  128. ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  129. atomic_set(&vha->loop_state, LOOP_DOWN);
  130. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  131. qla2x00_mark_vp_devices_dead(vha);
  132. atomic_set(&vha->vp_state, VP_FAILED);
  133. vha->flags.management_server_logged_in = 0;
  134. if (ret == QLA_SUCCESS) {
  135. fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
  136. } else {
  137. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  138. return -1;
  139. }
  140. return 0;
  141. }
  142. int
  143. qla24xx_enable_vp(scsi_qla_host_t *vha)
  144. {
  145. int ret;
  146. struct qla_hw_data *ha = vha->hw;
  147. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  148. /* Check if physical ha port is Up */
  149. if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
  150. atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
  151. !(ha->current_topology & ISP_CFG_F)) {
  152. vha->vp_err_state = VP_ERR_PORTDWN;
  153. fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
  154. goto enable_failed;
  155. }
  156. /* Initialize the new vport unless it is a persistent port */
  157. mutex_lock(&ha->vport_lock);
  158. ret = qla24xx_modify_vp_config(vha);
  159. mutex_unlock(&ha->vport_lock);
  160. if (ret != QLA_SUCCESS) {
  161. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  162. goto enable_failed;
  163. }
  164. ql_dbg(ql_dbg_taskm, vha, 0x801a,
  165. "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
  166. return 0;
  167. enable_failed:
  168. ql_dbg(ql_dbg_taskm, vha, 0x801b,
  169. "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
  170. return 1;
  171. }
  172. static void
  173. qla24xx_configure_vp(scsi_qla_host_t *vha)
  174. {
  175. struct fc_vport *fc_vport;
  176. int ret;
  177. fc_vport = vha->fc_vport;
  178. ql_dbg(ql_dbg_vport, vha, 0xa002,
  179. "%s: change request #3.\n", __func__);
  180. ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
  181. if (ret != QLA_SUCCESS) {
  182. ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
  183. "receiving of RSCN requests: 0x%x.\n", ret);
  184. return;
  185. } else {
  186. /* Corresponds to SCR enabled */
  187. clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
  188. }
  189. vha->flags.online = 1;
  190. if (qla24xx_configure_vhba(vha))
  191. return;
  192. atomic_set(&vha->vp_state, VP_ACTIVE);
  193. fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
  194. }
  195. void
  196. qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
  197. {
  198. scsi_qla_host_t *vha;
  199. struct qla_hw_data *ha = rsp->hw;
  200. int i = 0;
  201. unsigned long flags;
  202. spin_lock_irqsave(&ha->vport_slock, flags);
  203. list_for_each_entry(vha, &ha->vp_list, list) {
  204. if (vha->vp_idx) {
  205. atomic_inc(&vha->vref_count);
  206. spin_unlock_irqrestore(&ha->vport_slock, flags);
  207. switch (mb[0]) {
  208. case MBA_LIP_OCCURRED:
  209. case MBA_LOOP_UP:
  210. case MBA_LOOP_DOWN:
  211. case MBA_LIP_RESET:
  212. case MBA_POINT_TO_POINT:
  213. case MBA_CHG_IN_CONNECTION:
  214. case MBA_PORT_UPDATE:
  215. case MBA_RSCN_UPDATE:
  216. ql_dbg(ql_dbg_async, vha, 0x5024,
  217. "Async_event for VP[%d], mb=0x%x vha=%p.\n",
  218. i, *mb, vha);
  219. qla2x00_async_event(vha, rsp, mb);
  220. break;
  221. }
  222. spin_lock_irqsave(&ha->vport_slock, flags);
  223. atomic_dec(&vha->vref_count);
  224. }
  225. i++;
  226. }
  227. spin_unlock_irqrestore(&ha->vport_slock, flags);
  228. }
  229. int
  230. qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
  231. {
  232. /*
  233. * Physical port will do most of the abort and recovery work. We can
  234. * just treat it as a loop down
  235. */
  236. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  237. atomic_set(&vha->loop_state, LOOP_DOWN);
  238. qla2x00_mark_all_devices_lost(vha, 0);
  239. } else {
  240. if (!atomic_read(&vha->loop_down_timer))
  241. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  242. }
  243. /*
  244. * To exclusively reset vport, we need to log it out first. Note: this
  245. * control_vp can fail if ISP reset is already issued, this is
  246. * expected, as the vp would be already logged out due to ISP reset.
  247. */
  248. if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
  249. qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  250. ql_dbg(ql_dbg_taskm, vha, 0x801d,
  251. "Scheduling enable of Vport %d.\n", vha->vp_idx);
  252. return qla24xx_enable_vp(vha);
  253. }
  254. static int
  255. qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
  256. {
  257. ql_dbg(ql_dbg_dpc, vha, 0x4012,
  258. "Entering %s.\n", __func__);
  259. ql_dbg(ql_dbg_dpc, vha, 0x4013,
  260. "vp_flags: 0x%lx.\n", vha->vp_flags);
  261. qla2x00_do_work(vha);
  262. if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
  263. /* VP acquired. complete port configuration */
  264. ql_dbg(ql_dbg_dpc, vha, 0x4014,
  265. "Configure VP scheduled.\n");
  266. qla24xx_configure_vp(vha);
  267. ql_dbg(ql_dbg_dpc, vha, 0x4015,
  268. "Configure VP end.\n");
  269. return 0;
  270. }
  271. if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
  272. ql_dbg(ql_dbg_dpc, vha, 0x4016,
  273. "FCPort update scheduled.\n");
  274. qla2x00_update_fcports(vha);
  275. clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
  276. ql_dbg(ql_dbg_dpc, vha, 0x4017,
  277. "FCPort update end.\n");
  278. }
  279. if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
  280. !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
  281. atomic_read(&vha->loop_state) != LOOP_DOWN) {
  282. ql_dbg(ql_dbg_dpc, vha, 0x4018,
  283. "Relogin needed scheduled.\n");
  284. qla2x00_relogin(vha);
  285. ql_dbg(ql_dbg_dpc, vha, 0x4019,
  286. "Relogin needed end.\n");
  287. }
  288. if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
  289. (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
  290. clear_bit(RESET_ACTIVE, &vha->dpc_flags);
  291. }
  292. if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  293. if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
  294. ql_dbg(ql_dbg_dpc, vha, 0x401a,
  295. "Loop resync scheduled.\n");
  296. qla2x00_loop_resync(vha);
  297. clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
  298. ql_dbg(ql_dbg_dpc, vha, 0x401b,
  299. "Loop resync end.\n");
  300. }
  301. }
  302. ql_dbg(ql_dbg_dpc, vha, 0x401c,
  303. "Exiting %s.\n", __func__);
  304. return 0;
  305. }
  306. void
  307. qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
  308. {
  309. int ret;
  310. struct qla_hw_data *ha = vha->hw;
  311. scsi_qla_host_t *vp;
  312. unsigned long flags = 0;
  313. if (vha->vp_idx)
  314. return;
  315. if (list_empty(&ha->vp_list))
  316. return;
  317. clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
  318. if (!(ha->current_topology & ISP_CFG_F))
  319. return;
  320. spin_lock_irqsave(&ha->vport_slock, flags);
  321. list_for_each_entry(vp, &ha->vp_list, list) {
  322. if (vp->vp_idx) {
  323. atomic_inc(&vp->vref_count);
  324. spin_unlock_irqrestore(&ha->vport_slock, flags);
  325. ret = qla2x00_do_dpc_vp(vp);
  326. spin_lock_irqsave(&ha->vport_slock, flags);
  327. atomic_dec(&vp->vref_count);
  328. }
  329. }
  330. spin_unlock_irqrestore(&ha->vport_slock, flags);
  331. }
  332. int
  333. qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
  334. {
  335. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  336. struct qla_hw_data *ha = base_vha->hw;
  337. scsi_qla_host_t *vha;
  338. uint8_t port_name[WWN_SIZE];
  339. if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
  340. return VPCERR_UNSUPPORTED;
  341. /* Check up the F/W and H/W support NPIV */
  342. if (!ha->flags.npiv_supported)
  343. return VPCERR_UNSUPPORTED;
  344. /* Check up whether npiv supported switch presented */
  345. if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
  346. return VPCERR_NO_FABRIC_SUPP;
  347. /* Check up unique WWPN */
  348. u64_to_wwn(fc_vport->port_name, port_name);
  349. if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
  350. return VPCERR_BAD_WWN;
  351. vha = qla24xx_find_vhost_by_name(ha, port_name);
  352. if (vha)
  353. return VPCERR_BAD_WWN;
  354. /* Check up max-npiv-supports */
  355. if (ha->num_vhosts > ha->max_npiv_vports) {
  356. ql_dbg(ql_dbg_vport, vha, 0xa004,
  357. "num_vhosts %ud is bigger "
  358. "than max_npiv_vports %ud.\n",
  359. ha->num_vhosts, ha->max_npiv_vports);
  360. return VPCERR_UNSUPPORTED;
  361. }
  362. return 0;
  363. }
  364. scsi_qla_host_t *
  365. qla24xx_create_vhost(struct fc_vport *fc_vport)
  366. {
  367. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  368. struct qla_hw_data *ha = base_vha->hw;
  369. scsi_qla_host_t *vha;
  370. struct scsi_host_template *sht = &qla2xxx_driver_template;
  371. struct Scsi_Host *host;
  372. vha = qla2x00_create_host(sht, ha);
  373. if (!vha) {
  374. ql_log(ql_log_warn, vha, 0xa005,
  375. "scsi_host_alloc() failed for vport.\n");
  376. return(NULL);
  377. }
  378. host = vha->host;
  379. fc_vport->dd_data = vha;
  380. /* New host info */
  381. u64_to_wwn(fc_vport->node_name, vha->node_name);
  382. u64_to_wwn(fc_vport->port_name, vha->port_name);
  383. vha->fc_vport = fc_vport;
  384. vha->device_flags = 0;
  385. vha->vp_idx = qla24xx_allocate_vp_id(vha);
  386. if (vha->vp_idx > ha->max_npiv_vports) {
  387. ql_dbg(ql_dbg_vport, vha, 0xa006,
  388. "Couldn't allocate vp_id.\n");
  389. goto create_vhost_failed;
  390. }
  391. vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
  392. vha->dpc_flags = 0L;
  393. /*
  394. * To fix the issue of processing a parent's RSCN for the vport before
  395. * its SCR is complete.
  396. */
  397. set_bit(VP_SCR_NEEDED, &vha->vp_flags);
  398. atomic_set(&vha->loop_state, LOOP_DOWN);
  399. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  400. qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
  401. vha->req = base_vha->req;
  402. host->can_queue = base_vha->req->length + 128;
  403. host->this_id = 255;
  404. host->cmd_per_lun = 3;
  405. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
  406. host->max_cmd_len = 32;
  407. else
  408. host->max_cmd_len = MAX_CMDSZ;
  409. host->max_channel = MAX_BUSES - 1;
  410. host->max_lun = ql2xmaxlun;
  411. host->unique_id = host->host_no;
  412. host->max_id = ha->max_fibre_devices;
  413. host->transportt = qla2xxx_transport_vport_template;
  414. ql_dbg(ql_dbg_vport, vha, 0xa007,
  415. "Detect vport hba %ld at address = %p.\n",
  416. vha->host_no, vha);
  417. vha->flags.init_done = 1;
  418. mutex_lock(&ha->vport_lock);
  419. set_bit(vha->vp_idx, ha->vp_idx_map);
  420. ha->cur_vport_count++;
  421. mutex_unlock(&ha->vport_lock);
  422. return vha;
  423. create_vhost_failed:
  424. return NULL;
  425. }
  426. static void
  427. qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
  428. {
  429. struct qla_hw_data *ha = vha->hw;
  430. uint16_t que_id = req->id;
  431. dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
  432. sizeof(request_t), req->ring, req->dma);
  433. req->ring = NULL;
  434. req->dma = 0;
  435. if (que_id) {
  436. ha->req_q_map[que_id] = NULL;
  437. mutex_lock(&ha->vport_lock);
  438. clear_bit(que_id, ha->req_qid_map);
  439. mutex_unlock(&ha->vport_lock);
  440. }
  441. kfree(req);
  442. req = NULL;
  443. }
  444. static void
  445. qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  446. {
  447. struct qla_hw_data *ha = vha->hw;
  448. uint16_t que_id = rsp->id;
  449. if (rsp->msix && rsp->msix->have_irq) {
  450. free_irq(rsp->msix->vector, rsp);
  451. rsp->msix->have_irq = 0;
  452. rsp->msix->rsp = NULL;
  453. }
  454. dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
  455. sizeof(response_t), rsp->ring, rsp->dma);
  456. rsp->ring = NULL;
  457. rsp->dma = 0;
  458. if (que_id) {
  459. ha->rsp_q_map[que_id] = NULL;
  460. mutex_lock(&ha->vport_lock);
  461. clear_bit(que_id, ha->rsp_qid_map);
  462. mutex_unlock(&ha->vport_lock);
  463. }
  464. kfree(rsp);
  465. rsp = NULL;
  466. }
  467. int
  468. qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
  469. {
  470. int ret = -1;
  471. if (req) {
  472. req->options |= BIT_0;
  473. ret = qla25xx_init_req_que(vha, req);
  474. }
  475. if (ret == QLA_SUCCESS)
  476. qla25xx_free_req_que(vha, req);
  477. return ret;
  478. }
  479. static int
  480. qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  481. {
  482. int ret = -1;
  483. if (rsp) {
  484. rsp->options |= BIT_0;
  485. ret = qla25xx_init_rsp_que(vha, rsp);
  486. }
  487. if (ret == QLA_SUCCESS)
  488. qla25xx_free_rsp_que(vha, rsp);
  489. return ret;
  490. }
  491. /* Delete all queues for a given vhost */
  492. int
  493. qla25xx_delete_queues(struct scsi_qla_host *vha)
  494. {
  495. int cnt, ret = 0;
  496. struct req_que *req = NULL;
  497. struct rsp_que *rsp = NULL;
  498. struct qla_hw_data *ha = vha->hw;
  499. /* Delete request queues */
  500. for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
  501. req = ha->req_q_map[cnt];
  502. if (req) {
  503. ret = qla25xx_delete_req_que(vha, req);
  504. if (ret != QLA_SUCCESS) {
  505. ql_log(ql_log_warn, vha, 0x00ea,
  506. "Couldn't delete req que %d.\n",
  507. req->id);
  508. return ret;
  509. }
  510. }
  511. }
  512. /* Delete response queues */
  513. for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
  514. rsp = ha->rsp_q_map[cnt];
  515. if (rsp) {
  516. ret = qla25xx_delete_rsp_que(vha, rsp);
  517. if (ret != QLA_SUCCESS) {
  518. ql_log(ql_log_warn, vha, 0x00eb,
  519. "Couldn't delete rsp que %d.\n",
  520. rsp->id);
  521. return ret;
  522. }
  523. }
  524. }
  525. return ret;
  526. }
  527. int
  528. qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
  529. uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
  530. {
  531. int ret = 0;
  532. struct req_que *req = NULL;
  533. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  534. uint16_t que_id = 0;
  535. device_reg_t __iomem *reg;
  536. uint32_t cnt;
  537. req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
  538. if (req == NULL) {
  539. ql_log(ql_log_fatal, base_vha, 0x00d9,
  540. "Failed to allocate memory for request queue.\n");
  541. goto failed;
  542. }
  543. req->length = REQUEST_ENTRY_CNT_24XX;
  544. req->ring = dma_alloc_coherent(&ha->pdev->dev,
  545. (req->length + 1) * sizeof(request_t),
  546. &req->dma, GFP_KERNEL);
  547. if (req->ring == NULL) {
  548. ql_log(ql_log_fatal, base_vha, 0x00da,
  549. "Failed to allocte memory for request_ring.\n");
  550. goto que_failed;
  551. }
  552. mutex_lock(&ha->vport_lock);
  553. que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
  554. if (que_id >= ha->max_req_queues) {
  555. mutex_unlock(&ha->vport_lock);
  556. ql_log(ql_log_warn, base_vha, 0x00db,
  557. "No resources to create additional request queue.\n");
  558. goto que_failed;
  559. }
  560. set_bit(que_id, ha->req_qid_map);
  561. ha->req_q_map[que_id] = req;
  562. req->rid = rid;
  563. req->vp_idx = vp_idx;
  564. req->qos = qos;
  565. ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
  566. "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
  567. que_id, req->rid, req->vp_idx, req->qos);
  568. ql_dbg(ql_dbg_init, base_vha, 0x00dc,
  569. "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
  570. que_id, req->rid, req->vp_idx, req->qos);
  571. if (rsp_que < 0)
  572. req->rsp = NULL;
  573. else
  574. req->rsp = ha->rsp_q_map[rsp_que];
  575. /* Use alternate PCI bus number */
  576. if (MSB(req->rid))
  577. options |= BIT_4;
  578. /* Use alternate PCI devfn */
  579. if (LSB(req->rid))
  580. options |= BIT_5;
  581. req->options = options;
  582. ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
  583. "options=0x%x.\n", req->options);
  584. ql_dbg(ql_dbg_init, base_vha, 0x00dd,
  585. "options=0x%x.\n", req->options);
  586. for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
  587. req->outstanding_cmds[cnt] = NULL;
  588. req->current_outstanding_cmd = 1;
  589. req->ring_ptr = req->ring;
  590. req->ring_index = 0;
  591. req->cnt = req->length;
  592. req->id = que_id;
  593. reg = ISP_QUE_REG(ha, que_id);
  594. req->max_q_depth = ha->req_q_map[0]->max_q_depth;
  595. mutex_unlock(&ha->vport_lock);
  596. ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
  597. "ring_ptr=%p ring_index=%d, "
  598. "cnt=%d id=%d max_q_depth=%d.\n",
  599. req->ring_ptr, req->ring_index,
  600. req->cnt, req->id, req->max_q_depth);
  601. ql_dbg(ql_dbg_init, base_vha, 0x00de,
  602. "ring_ptr=%p ring_index=%d, "
  603. "cnt=%d id=%d max_q_depth=%d.\n",
  604. req->ring_ptr, req->ring_index, req->cnt,
  605. req->id, req->max_q_depth);
  606. ret = qla25xx_init_req_que(base_vha, req);
  607. if (ret != QLA_SUCCESS) {
  608. ql_log(ql_log_fatal, base_vha, 0x00df,
  609. "%s failed.\n", __func__);
  610. mutex_lock(&ha->vport_lock);
  611. clear_bit(que_id, ha->req_qid_map);
  612. mutex_unlock(&ha->vport_lock);
  613. goto que_failed;
  614. }
  615. return req->id;
  616. que_failed:
  617. qla25xx_free_req_que(base_vha, req);
  618. failed:
  619. return 0;
  620. }
  621. static void qla_do_work(struct work_struct *work)
  622. {
  623. unsigned long flags;
  624. struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
  625. struct scsi_qla_host *vha;
  626. struct qla_hw_data *ha = rsp->hw;
  627. spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
  628. vha = pci_get_drvdata(ha->pdev);
  629. qla24xx_process_response_queue(vha, rsp);
  630. spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
  631. }
  632. /* create response queue */
  633. int
  634. qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
  635. uint8_t vp_idx, uint16_t rid, int req)
  636. {
  637. int ret = 0;
  638. struct rsp_que *rsp = NULL;
  639. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  640. uint16_t que_id = 0;
  641. device_reg_t __iomem *reg;
  642. rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
  643. if (rsp == NULL) {
  644. ql_log(ql_log_warn, base_vha, 0x0066,
  645. "Failed to allocate memory for response queue.\n");
  646. goto failed;
  647. }
  648. rsp->length = RESPONSE_ENTRY_CNT_MQ;
  649. rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
  650. (rsp->length + 1) * sizeof(response_t),
  651. &rsp->dma, GFP_KERNEL);
  652. if (rsp->ring == NULL) {
  653. ql_log(ql_log_warn, base_vha, 0x00e1,
  654. "Failed to allocate memory for response ring.\n");
  655. goto que_failed;
  656. }
  657. mutex_lock(&ha->vport_lock);
  658. que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
  659. if (que_id >= ha->max_rsp_queues) {
  660. mutex_unlock(&ha->vport_lock);
  661. ql_log(ql_log_warn, base_vha, 0x00e2,
  662. "No resources to create additional request queue.\n");
  663. goto que_failed;
  664. }
  665. set_bit(que_id, ha->rsp_qid_map);
  666. if (ha->flags.msix_enabled)
  667. rsp->msix = &ha->msix_entries[que_id + 1];
  668. else
  669. ql_log(ql_log_warn, base_vha, 0x00e3,
  670. "MSIX not enalbled.\n");
  671. ha->rsp_q_map[que_id] = rsp;
  672. rsp->rid = rid;
  673. rsp->vp_idx = vp_idx;
  674. rsp->hw = ha;
  675. ql_dbg(ql_dbg_init, base_vha, 0x00e4,
  676. "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
  677. que_id, rsp->rid, rsp->vp_idx, rsp->hw);
  678. /* Use alternate PCI bus number */
  679. if (MSB(rsp->rid))
  680. options |= BIT_4;
  681. /* Use alternate PCI devfn */
  682. if (LSB(rsp->rid))
  683. options |= BIT_5;
  684. /* Enable MSIX handshake mode on for uncapable adapters */
  685. if (!IS_MSIX_NACK_CAPABLE(ha))
  686. options |= BIT_6;
  687. rsp->options = options;
  688. rsp->id = que_id;
  689. reg = ISP_QUE_REG(ha, que_id);
  690. rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
  691. rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
  692. mutex_unlock(&ha->vport_lock);
  693. ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
  694. "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
  695. rsp->options, rsp->id, rsp->rsp_q_in,
  696. rsp->rsp_q_out);
  697. ql_dbg(ql_dbg_init, base_vha, 0x00e5,
  698. "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
  699. rsp->options, rsp->id, rsp->rsp_q_in,
  700. rsp->rsp_q_out);
  701. ret = qla25xx_request_irq(rsp);
  702. if (ret)
  703. goto que_failed;
  704. ret = qla25xx_init_rsp_que(base_vha, rsp);
  705. if (ret != QLA_SUCCESS) {
  706. ql_log(ql_log_fatal, base_vha, 0x00e7,
  707. "%s failed.\n", __func__);
  708. mutex_lock(&ha->vport_lock);
  709. clear_bit(que_id, ha->rsp_qid_map);
  710. mutex_unlock(&ha->vport_lock);
  711. goto que_failed;
  712. }
  713. if (req >= 0)
  714. rsp->req = ha->req_q_map[req];
  715. else
  716. rsp->req = NULL;
  717. qla2x00_init_response_q_entries(rsp);
  718. if (rsp->hw->wq)
  719. INIT_WORK(&rsp->q_work, qla_do_work);
  720. return rsp->id;
  721. que_failed:
  722. qla25xx_free_rsp_que(base_vha, rsp);
  723. failed:
  724. return 0;
  725. }