qla_mid.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2014 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_gbl.h"
  9. #include "qla_target.h"
  10. #include <linux/moduleparam.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/slab.h>
  13. #include <linux/list.h>
  14. #include <scsi/scsi_tcq.h>
  15. #include <scsi/scsicam.h>
  16. #include <linux/delay.h>
  17. void
  18. qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
  19. {
  20. if (vha->vp_idx && vha->timer_active) {
  21. del_timer_sync(&vha->timer);
  22. vha->timer_active = 0;
  23. }
  24. }
  25. static uint32_t
  26. qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
  27. {
  28. uint32_t vp_id;
  29. struct qla_hw_data *ha = vha->hw;
  30. unsigned long flags;
  31. /* Find an empty slot and assign an vp_id */
  32. mutex_lock(&ha->vport_lock);
  33. vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
  34. if (vp_id > ha->max_npiv_vports) {
  35. ql_dbg(ql_dbg_vport, vha, 0xa000,
  36. "vp_id %d is bigger than max-supported %d.\n",
  37. vp_id, ha->max_npiv_vports);
  38. mutex_unlock(&ha->vport_lock);
  39. return vp_id;
  40. }
  41. set_bit(vp_id, ha->vp_idx_map);
  42. ha->num_vhosts++;
  43. vha->vp_idx = vp_id;
  44. spin_lock_irqsave(&ha->vport_slock, flags);
  45. list_add_tail(&vha->list, &ha->vp_list);
  46. qlt_update_vp_map(vha, SET_VP_IDX);
  47. spin_unlock_irqrestore(&ha->vport_slock, flags);
  48. mutex_unlock(&ha->vport_lock);
  49. return vp_id;
  50. }
  51. void
  52. qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
  53. {
  54. uint16_t vp_id;
  55. struct qla_hw_data *ha = vha->hw;
  56. unsigned long flags = 0;
  57. mutex_lock(&ha->vport_lock);
  58. /*
  59. * Wait for all pending activities to finish before removing vport from
  60. * the list.
  61. * Lock needs to be held for safe removal from the list (it
  62. * ensures no active vp_list traversal while the vport is removed
  63. * from the queue)
  64. */
  65. wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
  66. 10*HZ);
  67. spin_lock_irqsave(&ha->vport_slock, flags);
  68. if (atomic_read(&vha->vref_count)) {
  69. ql_dbg(ql_dbg_vport, vha, 0xfffa,
  70. "vha->vref_count=%u timeout\n", vha->vref_count.counter);
  71. vha->vref_count = (atomic_t)ATOMIC_INIT(0);
  72. }
  73. list_del(&vha->list);
  74. qlt_update_vp_map(vha, RESET_VP_IDX);
  75. spin_unlock_irqrestore(&ha->vport_slock, flags);
  76. vp_id = vha->vp_idx;
  77. ha->num_vhosts--;
  78. clear_bit(vp_id, ha->vp_idx_map);
  79. mutex_unlock(&ha->vport_lock);
  80. }
  81. static scsi_qla_host_t *
  82. qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
  83. {
  84. scsi_qla_host_t *vha;
  85. struct scsi_qla_host *tvha;
  86. unsigned long flags;
  87. spin_lock_irqsave(&ha->vport_slock, flags);
  88. /* Locate matching device in database. */
  89. list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
  90. if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
  91. spin_unlock_irqrestore(&ha->vport_slock, flags);
  92. return vha;
  93. }
  94. }
  95. spin_unlock_irqrestore(&ha->vport_slock, flags);
  96. return NULL;
  97. }
  98. /*
  99. * qla2x00_mark_vp_devices_dead
  100. * Updates fcport state when device goes offline.
  101. *
  102. * Input:
  103. * ha = adapter block pointer.
  104. * fcport = port structure pointer.
  105. *
  106. * Return:
  107. * None.
  108. *
  109. * Context:
  110. */
  111. static void
  112. qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
  113. {
  114. /*
  115. * !!! NOTE !!!
  116. * This function, if called in contexts other than vp create, disable
  117. * or delete, please make sure this is synchronized with the
  118. * delete thread.
  119. */
  120. fc_port_t *fcport;
  121. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  122. ql_dbg(ql_dbg_vport, vha, 0xa001,
  123. "Marking port dead, loop_id=0x%04x : %x.\n",
  124. fcport->loop_id, fcport->vha->vp_idx);
  125. qla2x00_mark_device_lost(vha, fcport, 0, 0);
  126. qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
  127. }
  128. }
  129. int
  130. qla24xx_disable_vp(scsi_qla_host_t *vha)
  131. {
  132. unsigned long flags;
  133. int ret;
  134. fc_port_t *fcport;
  135. ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  136. atomic_set(&vha->loop_state, LOOP_DOWN);
  137. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  138. list_for_each_entry(fcport, &vha->vp_fcports, list)
  139. fcport->logout_on_delete = 0;
  140. qla2x00_mark_all_devices_lost(vha, 0);
  141. /* Remove port id from vp target map */
  142. spin_lock_irqsave(&vha->hw->vport_slock, flags);
  143. qlt_update_vp_map(vha, RESET_AL_PA);
  144. spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
  145. qla2x00_mark_vp_devices_dead(vha);
  146. atomic_set(&vha->vp_state, VP_FAILED);
  147. vha->flags.management_server_logged_in = 0;
  148. if (ret == QLA_SUCCESS) {
  149. fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
  150. } else {
  151. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  152. return -1;
  153. }
  154. return 0;
  155. }
  156. int
  157. qla24xx_enable_vp(scsi_qla_host_t *vha)
  158. {
  159. int ret;
  160. struct qla_hw_data *ha = vha->hw;
  161. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  162. /* Check if physical ha port is Up */
  163. if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
  164. atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
  165. !(ha->current_topology & ISP_CFG_F)) {
  166. vha->vp_err_state = VP_ERR_PORTDWN;
  167. fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
  168. ql_dbg(ql_dbg_taskm, vha, 0x800b,
  169. "%s skip enable. loop_state %x topo %x\n",
  170. __func__, base_vha->loop_state.counter,
  171. ha->current_topology);
  172. goto enable_failed;
  173. }
  174. /* Initialize the new vport unless it is a persistent port */
  175. mutex_lock(&ha->vport_lock);
  176. ret = qla24xx_modify_vp_config(vha);
  177. mutex_unlock(&ha->vport_lock);
  178. if (ret != QLA_SUCCESS) {
  179. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  180. goto enable_failed;
  181. }
  182. ql_dbg(ql_dbg_taskm, vha, 0x801a,
  183. "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
  184. return 0;
  185. enable_failed:
  186. ql_dbg(ql_dbg_taskm, vha, 0x801b,
  187. "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
  188. return 1;
  189. }
  190. static void
  191. qla24xx_configure_vp(scsi_qla_host_t *vha)
  192. {
  193. struct fc_vport *fc_vport;
  194. int ret;
  195. fc_vport = vha->fc_vport;
  196. ql_dbg(ql_dbg_vport, vha, 0xa002,
  197. "%s: change request #3.\n", __func__);
  198. ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
  199. if (ret != QLA_SUCCESS) {
  200. ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
  201. "receiving of RSCN requests: 0x%x.\n", ret);
  202. return;
  203. } else {
  204. /* Corresponds to SCR enabled */
  205. clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
  206. }
  207. vha->flags.online = 1;
  208. if (qla24xx_configure_vhba(vha))
  209. return;
  210. atomic_set(&vha->vp_state, VP_ACTIVE);
  211. fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
  212. }
  213. void
  214. qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
  215. {
  216. scsi_qla_host_t *vha;
  217. struct qla_hw_data *ha = rsp->hw;
  218. int i = 0;
  219. unsigned long flags;
  220. spin_lock_irqsave(&ha->vport_slock, flags);
  221. list_for_each_entry(vha, &ha->vp_list, list) {
  222. if (vha->vp_idx) {
  223. atomic_inc(&vha->vref_count);
  224. spin_unlock_irqrestore(&ha->vport_slock, flags);
  225. switch (mb[0]) {
  226. case MBA_LIP_OCCURRED:
  227. case MBA_LOOP_UP:
  228. case MBA_LOOP_DOWN:
  229. case MBA_LIP_RESET:
  230. case MBA_POINT_TO_POINT:
  231. case MBA_CHG_IN_CONNECTION:
  232. case MBA_PORT_UPDATE:
  233. case MBA_RSCN_UPDATE:
  234. ql_dbg(ql_dbg_async, vha, 0x5024,
  235. "Async_event for VP[%d], mb=0x%x vha=%p.\n",
  236. i, *mb, vha);
  237. qla2x00_async_event(vha, rsp, mb);
  238. break;
  239. }
  240. spin_lock_irqsave(&ha->vport_slock, flags);
  241. atomic_dec(&vha->vref_count);
  242. wake_up(&vha->vref_waitq);
  243. }
  244. i++;
  245. }
  246. spin_unlock_irqrestore(&ha->vport_slock, flags);
  247. }
  248. int
  249. qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
  250. {
  251. /*
  252. * Physical port will do most of the abort and recovery work. We can
  253. * just treat it as a loop down
  254. */
  255. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  256. atomic_set(&vha->loop_state, LOOP_DOWN);
  257. qla2x00_mark_all_devices_lost(vha, 0);
  258. } else {
  259. if (!atomic_read(&vha->loop_down_timer))
  260. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  261. }
  262. /*
  263. * To exclusively reset vport, we need to log it out first. Note: this
  264. * control_vp can fail if ISP reset is already issued, this is
  265. * expected, as the vp would be already logged out due to ISP reset.
  266. */
  267. if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
  268. qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  269. ql_dbg(ql_dbg_taskm, vha, 0x801d,
  270. "Scheduling enable of Vport %d.\n", vha->vp_idx);
  271. return qla24xx_enable_vp(vha);
  272. }
  273. static int
  274. qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
  275. {
  276. struct qla_hw_data *ha = vha->hw;
  277. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  278. ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
  279. "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
  280. qla2x00_do_work(vha);
  281. /* Check if Fw is ready to configure VP first */
  282. if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
  283. if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
  284. /* VP acquired. complete port configuration */
  285. ql_dbg(ql_dbg_dpc, vha, 0x4014,
  286. "Configure VP scheduled.\n");
  287. qla24xx_configure_vp(vha);
  288. ql_dbg(ql_dbg_dpc, vha, 0x4015,
  289. "Configure VP end.\n");
  290. return 0;
  291. }
  292. }
  293. if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
  294. ql_dbg(ql_dbg_dpc, vha, 0x4016,
  295. "FCPort update scheduled.\n");
  296. qla2x00_update_fcports(vha);
  297. clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
  298. ql_dbg(ql_dbg_dpc, vha, 0x4017,
  299. "FCPort update end.\n");
  300. }
  301. if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
  302. !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
  303. atomic_read(&vha->loop_state) != LOOP_DOWN) {
  304. if (!vha->relogin_jif ||
  305. time_after_eq(jiffies, vha->relogin_jif)) {
  306. vha->relogin_jif = jiffies + HZ;
  307. clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
  308. ql_dbg(ql_dbg_dpc, vha, 0x4018,
  309. "Relogin needed scheduled.\n");
  310. qla2x00_relogin(vha);
  311. ql_dbg(ql_dbg_dpc, vha, 0x4019,
  312. "Relogin needed end.\n");
  313. }
  314. }
  315. if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
  316. (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
  317. clear_bit(RESET_ACTIVE, &vha->dpc_flags);
  318. }
  319. if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  320. if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
  321. ql_dbg(ql_dbg_dpc, vha, 0x401a,
  322. "Loop resync scheduled.\n");
  323. qla2x00_loop_resync(vha);
  324. clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
  325. ql_dbg(ql_dbg_dpc, vha, 0x401b,
  326. "Loop resync end.\n");
  327. }
  328. }
  329. ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
  330. "Exiting %s.\n", __func__);
  331. return 0;
  332. }
  333. void
  334. qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
  335. {
  336. struct qla_hw_data *ha = vha->hw;
  337. scsi_qla_host_t *vp;
  338. unsigned long flags = 0;
  339. if (vha->vp_idx)
  340. return;
  341. if (list_empty(&ha->vp_list))
  342. return;
  343. clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
  344. if (!(ha->current_topology & ISP_CFG_F))
  345. return;
  346. spin_lock_irqsave(&ha->vport_slock, flags);
  347. list_for_each_entry(vp, &ha->vp_list, list) {
  348. if (vp->vp_idx) {
  349. atomic_inc(&vp->vref_count);
  350. spin_unlock_irqrestore(&ha->vport_slock, flags);
  351. qla2x00_do_dpc_vp(vp);
  352. spin_lock_irqsave(&ha->vport_slock, flags);
  353. atomic_dec(&vp->vref_count);
  354. }
  355. }
  356. spin_unlock_irqrestore(&ha->vport_slock, flags);
  357. }
  358. int
  359. qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
  360. {
  361. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  362. struct qla_hw_data *ha = base_vha->hw;
  363. scsi_qla_host_t *vha;
  364. uint8_t port_name[WWN_SIZE];
  365. if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
  366. return VPCERR_UNSUPPORTED;
  367. /* Check up the F/W and H/W support NPIV */
  368. if (!ha->flags.npiv_supported)
  369. return VPCERR_UNSUPPORTED;
  370. /* Check up whether npiv supported switch presented */
  371. if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
  372. return VPCERR_NO_FABRIC_SUPP;
  373. /* Check up unique WWPN */
  374. u64_to_wwn(fc_vport->port_name, port_name);
  375. if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
  376. return VPCERR_BAD_WWN;
  377. vha = qla24xx_find_vhost_by_name(ha, port_name);
  378. if (vha)
  379. return VPCERR_BAD_WWN;
  380. /* Check up max-npiv-supports */
  381. if (ha->num_vhosts > ha->max_npiv_vports) {
  382. ql_dbg(ql_dbg_vport, vha, 0xa004,
  383. "num_vhosts %ud is bigger "
  384. "than max_npiv_vports %ud.\n",
  385. ha->num_vhosts, ha->max_npiv_vports);
  386. return VPCERR_UNSUPPORTED;
  387. }
  388. return 0;
  389. }
  390. scsi_qla_host_t *
  391. qla24xx_create_vhost(struct fc_vport *fc_vport)
  392. {
  393. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  394. struct qla_hw_data *ha = base_vha->hw;
  395. scsi_qla_host_t *vha;
  396. struct scsi_host_template *sht = &qla2xxx_driver_template;
  397. struct Scsi_Host *host;
  398. vha = qla2x00_create_host(sht, ha);
  399. if (!vha) {
  400. ql_log(ql_log_warn, vha, 0xa005,
  401. "scsi_host_alloc() failed for vport.\n");
  402. return(NULL);
  403. }
  404. host = vha->host;
  405. fc_vport->dd_data = vha;
  406. /* New host info */
  407. u64_to_wwn(fc_vport->node_name, vha->node_name);
  408. u64_to_wwn(fc_vport->port_name, vha->port_name);
  409. vha->fc_vport = fc_vport;
  410. vha->device_flags = 0;
  411. vha->vp_idx = qla24xx_allocate_vp_id(vha);
  412. if (vha->vp_idx > ha->max_npiv_vports) {
  413. ql_dbg(ql_dbg_vport, vha, 0xa006,
  414. "Couldn't allocate vp_id.\n");
  415. goto create_vhost_failed;
  416. }
  417. vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
  418. vha->dpc_flags = 0L;
  419. /*
  420. * To fix the issue of processing a parent's RSCN for the vport before
  421. * its SCR is complete.
  422. */
  423. set_bit(VP_SCR_NEEDED, &vha->vp_flags);
  424. atomic_set(&vha->loop_state, LOOP_DOWN);
  425. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  426. qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
  427. vha->req = base_vha->req;
  428. host->can_queue = base_vha->req->length + 128;
  429. host->cmd_per_lun = 3;
  430. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
  431. host->max_cmd_len = 32;
  432. else
  433. host->max_cmd_len = MAX_CMDSZ;
  434. host->max_channel = MAX_BUSES - 1;
  435. host->max_lun = ql2xmaxlun;
  436. host->unique_id = host->host_no;
  437. host->max_id = ha->max_fibre_devices;
  438. host->transportt = qla2xxx_transport_vport_template;
  439. ql_dbg(ql_dbg_vport, vha, 0xa007,
  440. "Detect vport hba %ld at address = %p.\n",
  441. vha->host_no, vha);
  442. vha->flags.init_done = 1;
  443. mutex_lock(&ha->vport_lock);
  444. set_bit(vha->vp_idx, ha->vp_idx_map);
  445. ha->cur_vport_count++;
  446. mutex_unlock(&ha->vport_lock);
  447. return vha;
  448. create_vhost_failed:
  449. return NULL;
  450. }
  451. static void
  452. qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
  453. {
  454. struct qla_hw_data *ha = vha->hw;
  455. uint16_t que_id = req->id;
  456. dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
  457. sizeof(request_t), req->ring, req->dma);
  458. req->ring = NULL;
  459. req->dma = 0;
  460. if (que_id) {
  461. ha->req_q_map[que_id] = NULL;
  462. mutex_lock(&ha->vport_lock);
  463. clear_bit(que_id, ha->req_qid_map);
  464. mutex_unlock(&ha->vport_lock);
  465. }
  466. kfree(req->outstanding_cmds);
  467. kfree(req);
  468. req = NULL;
  469. }
  470. static void
  471. qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  472. {
  473. struct qla_hw_data *ha = vha->hw;
  474. uint16_t que_id = rsp->id;
  475. if (rsp->msix && rsp->msix->have_irq) {
  476. free_irq(rsp->msix->vector, rsp->msix->handle);
  477. rsp->msix->have_irq = 0;
  478. rsp->msix->in_use = 0;
  479. rsp->msix->handle = NULL;
  480. }
  481. dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
  482. sizeof(response_t), rsp->ring, rsp->dma);
  483. rsp->ring = NULL;
  484. rsp->dma = 0;
  485. if (que_id) {
  486. ha->rsp_q_map[que_id] = NULL;
  487. mutex_lock(&ha->vport_lock);
  488. clear_bit(que_id, ha->rsp_qid_map);
  489. mutex_unlock(&ha->vport_lock);
  490. }
  491. kfree(rsp);
  492. rsp = NULL;
  493. }
  494. int
  495. qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
  496. {
  497. int ret = QLA_SUCCESS;
  498. if (req && vha->flags.qpairs_req_created) {
  499. req->options |= BIT_0;
  500. ret = qla25xx_init_req_que(vha, req);
  501. if (ret != QLA_SUCCESS)
  502. return QLA_FUNCTION_FAILED;
  503. qla25xx_free_req_que(vha, req);
  504. }
  505. return ret;
  506. }
  507. int
  508. qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  509. {
  510. int ret = QLA_SUCCESS;
  511. if (rsp && vha->flags.qpairs_rsp_created) {
  512. rsp->options |= BIT_0;
  513. ret = qla25xx_init_rsp_que(vha, rsp);
  514. if (ret != QLA_SUCCESS)
  515. return QLA_FUNCTION_FAILED;
  516. qla25xx_free_rsp_que(vha, rsp);
  517. }
  518. return ret;
  519. }
  520. /* Delete all queues for a given vhost */
  521. int
  522. qla25xx_delete_queues(struct scsi_qla_host *vha)
  523. {
  524. int cnt, ret = 0;
  525. struct req_que *req = NULL;
  526. struct rsp_que *rsp = NULL;
  527. struct qla_hw_data *ha = vha->hw;
  528. struct qla_qpair *qpair, *tqpair;
  529. if (ql2xmqsupport) {
  530. list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
  531. qp_list_elem)
  532. qla2xxx_delete_qpair(vha, qpair);
  533. } else {
  534. /* Delete request queues */
  535. for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
  536. req = ha->req_q_map[cnt];
  537. if (req && test_bit(cnt, ha->req_qid_map)) {
  538. ret = qla25xx_delete_req_que(vha, req);
  539. if (ret != QLA_SUCCESS) {
  540. ql_log(ql_log_warn, vha, 0x00ea,
  541. "Couldn't delete req que %d.\n",
  542. req->id);
  543. return ret;
  544. }
  545. }
  546. }
  547. /* Delete response queues */
  548. for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
  549. rsp = ha->rsp_q_map[cnt];
  550. if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
  551. ret = qla25xx_delete_rsp_que(vha, rsp);
  552. if (ret != QLA_SUCCESS) {
  553. ql_log(ql_log_warn, vha, 0x00eb,
  554. "Couldn't delete rsp que %d.\n",
  555. rsp->id);
  556. return ret;
  557. }
  558. }
  559. }
  560. }
  561. return ret;
  562. }
  563. int
  564. qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
  565. uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
  566. {
  567. int ret = 0;
  568. struct req_que *req = NULL;
  569. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  570. struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
  571. uint16_t que_id = 0;
  572. device_reg_t *reg;
  573. uint32_t cnt;
  574. req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
  575. if (req == NULL) {
  576. ql_log(ql_log_fatal, base_vha, 0x00d9,
  577. "Failed to allocate memory for request queue.\n");
  578. goto failed;
  579. }
  580. req->length = REQUEST_ENTRY_CNT_24XX;
  581. req->ring = dma_alloc_coherent(&ha->pdev->dev,
  582. (req->length + 1) * sizeof(request_t),
  583. &req->dma, GFP_KERNEL);
  584. if (req->ring == NULL) {
  585. ql_log(ql_log_fatal, base_vha, 0x00da,
  586. "Failed to allocate memory for request_ring.\n");
  587. goto que_failed;
  588. }
  589. ret = qla2x00_alloc_outstanding_cmds(ha, req);
  590. if (ret != QLA_SUCCESS)
  591. goto que_failed;
  592. mutex_lock(&ha->mq_lock);
  593. que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
  594. if (que_id >= ha->max_req_queues) {
  595. mutex_unlock(&ha->mq_lock);
  596. ql_log(ql_log_warn, base_vha, 0x00db,
  597. "No resources to create additional request queue.\n");
  598. goto que_failed;
  599. }
  600. set_bit(que_id, ha->req_qid_map);
  601. ha->req_q_map[que_id] = req;
  602. req->rid = rid;
  603. req->vp_idx = vp_idx;
  604. req->qos = qos;
  605. ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
  606. "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
  607. que_id, req->rid, req->vp_idx, req->qos);
  608. ql_dbg(ql_dbg_init, base_vha, 0x00dc,
  609. "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
  610. que_id, req->rid, req->vp_idx, req->qos);
  611. if (rsp_que < 0)
  612. req->rsp = NULL;
  613. else
  614. req->rsp = ha->rsp_q_map[rsp_que];
  615. /* Use alternate PCI bus number */
  616. if (MSB(req->rid))
  617. options |= BIT_4;
  618. /* Use alternate PCI devfn */
  619. if (LSB(req->rid))
  620. options |= BIT_5;
  621. req->options = options;
  622. ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
  623. "options=0x%x.\n", req->options);
  624. ql_dbg(ql_dbg_init, base_vha, 0x00dd,
  625. "options=0x%x.\n", req->options);
  626. for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
  627. req->outstanding_cmds[cnt] = NULL;
  628. req->current_outstanding_cmd = 1;
  629. req->ring_ptr = req->ring;
  630. req->ring_index = 0;
  631. req->cnt = req->length;
  632. req->id = que_id;
  633. reg = ISP_QUE_REG(ha, que_id);
  634. req->req_q_in = &reg->isp25mq.req_q_in;
  635. req->req_q_out = &reg->isp25mq.req_q_out;
  636. req->max_q_depth = ha->req_q_map[0]->max_q_depth;
  637. req->out_ptr = (void *)(req->ring + req->length);
  638. mutex_unlock(&ha->mq_lock);
  639. ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
  640. "ring_ptr=%p ring_index=%d, "
  641. "cnt=%d id=%d max_q_depth=%d.\n",
  642. req->ring_ptr, req->ring_index,
  643. req->cnt, req->id, req->max_q_depth);
  644. ql_dbg(ql_dbg_init, base_vha, 0x00de,
  645. "ring_ptr=%p ring_index=%d, "
  646. "cnt=%d id=%d max_q_depth=%d.\n",
  647. req->ring_ptr, req->ring_index, req->cnt,
  648. req->id, req->max_q_depth);
  649. if (startqp) {
  650. ret = qla25xx_init_req_que(base_vha, req);
  651. if (ret != QLA_SUCCESS) {
  652. ql_log(ql_log_fatal, base_vha, 0x00df,
  653. "%s failed.\n", __func__);
  654. mutex_lock(&ha->mq_lock);
  655. clear_bit(que_id, ha->req_qid_map);
  656. mutex_unlock(&ha->mq_lock);
  657. goto que_failed;
  658. }
  659. vha->flags.qpairs_req_created = 1;
  660. }
  661. return req->id;
  662. que_failed:
  663. qla25xx_free_req_que(base_vha, req);
  664. failed:
  665. return 0;
  666. }
  667. static void qla_do_work(struct work_struct *work)
  668. {
  669. unsigned long flags;
  670. struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
  671. struct scsi_qla_host *vha;
  672. struct qla_hw_data *ha = qpair->hw;
  673. struct srb_iocb *nvme, *nxt_nvme;
  674. spin_lock_irqsave(&qpair->qp_lock, flags);
  675. vha = pci_get_drvdata(ha->pdev);
  676. qla24xx_process_response_queue(vha, qpair->rsp);
  677. spin_unlock_irqrestore(&qpair->qp_lock, flags);
  678. list_for_each_entry_safe(nvme, nxt_nvme, &qpair->nvme_done_list,
  679. u.nvme.entry) {
  680. list_del_init(&nvme->u.nvme.entry);
  681. qla_nvme_cmpl_io(nvme);
  682. }
  683. }
  684. /* create response queue */
  685. int
  686. qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
  687. uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
  688. {
  689. int ret = 0;
  690. struct rsp_que *rsp = NULL;
  691. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  692. struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
  693. uint16_t que_id = 0;
  694. device_reg_t *reg;
  695. rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
  696. if (rsp == NULL) {
  697. ql_log(ql_log_warn, base_vha, 0x0066,
  698. "Failed to allocate memory for response queue.\n");
  699. goto failed;
  700. }
  701. rsp->length = RESPONSE_ENTRY_CNT_MQ;
  702. rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
  703. (rsp->length + 1) * sizeof(response_t),
  704. &rsp->dma, GFP_KERNEL);
  705. if (rsp->ring == NULL) {
  706. ql_log(ql_log_warn, base_vha, 0x00e1,
  707. "Failed to allocate memory for response ring.\n");
  708. goto que_failed;
  709. }
  710. mutex_lock(&ha->mq_lock);
  711. que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
  712. if (que_id >= ha->max_rsp_queues) {
  713. mutex_unlock(&ha->mq_lock);
  714. ql_log(ql_log_warn, base_vha, 0x00e2,
  715. "No resources to create additional request queue.\n");
  716. goto que_failed;
  717. }
  718. set_bit(que_id, ha->rsp_qid_map);
  719. rsp->msix = qpair->msix;
  720. ha->rsp_q_map[que_id] = rsp;
  721. rsp->rid = rid;
  722. rsp->vp_idx = vp_idx;
  723. rsp->hw = ha;
  724. ql_dbg(ql_dbg_init, base_vha, 0x00e4,
  725. "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
  726. que_id, rsp->rid, rsp->vp_idx, rsp->hw);
  727. /* Use alternate PCI bus number */
  728. if (MSB(rsp->rid))
  729. options |= BIT_4;
  730. /* Use alternate PCI devfn */
  731. if (LSB(rsp->rid))
  732. options |= BIT_5;
  733. /* Enable MSIX handshake mode on for uncapable adapters */
  734. if (!IS_MSIX_NACK_CAPABLE(ha))
  735. options |= BIT_6;
  736. /* Set option to indicate response queue creation */
  737. options |= BIT_1;
  738. rsp->options = options;
  739. rsp->id = que_id;
  740. reg = ISP_QUE_REG(ha, que_id);
  741. rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
  742. rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
  743. rsp->in_ptr = (void *)(rsp->ring + rsp->length);
  744. mutex_unlock(&ha->mq_lock);
  745. ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
  746. "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
  747. rsp->options, rsp->id, rsp->rsp_q_in,
  748. rsp->rsp_q_out);
  749. ql_dbg(ql_dbg_init, base_vha, 0x00e5,
  750. "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
  751. rsp->options, rsp->id, rsp->rsp_q_in,
  752. rsp->rsp_q_out);
  753. ret = qla25xx_request_irq(ha, qpair, qpair->msix,
  754. QLA_MSIX_QPAIR_MULTIQ_RSP_Q);
  755. if (ret)
  756. goto que_failed;
  757. if (startqp) {
  758. ret = qla25xx_init_rsp_que(base_vha, rsp);
  759. if (ret != QLA_SUCCESS) {
  760. ql_log(ql_log_fatal, base_vha, 0x00e7,
  761. "%s failed.\n", __func__);
  762. mutex_lock(&ha->mq_lock);
  763. clear_bit(que_id, ha->rsp_qid_map);
  764. mutex_unlock(&ha->mq_lock);
  765. goto que_failed;
  766. }
  767. vha->flags.qpairs_rsp_created = 1;
  768. }
  769. rsp->req = NULL;
  770. qla2x00_init_response_q_entries(rsp);
  771. if (qpair->hw->wq)
  772. INIT_WORK(&qpair->q_work, qla_do_work);
  773. return rsp->id;
  774. que_failed:
  775. qla25xx_free_rsp_que(base_vha, rsp);
  776. failed:
  777. return 0;
  778. }