qla_mid.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2014 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_gbl.h"
  9. #include "qla_target.h"
  10. #include <linux/moduleparam.h>
  11. #include <linux/vmalloc.h>
  12. #include <linux/slab.h>
  13. #include <linux/list.h>
  14. #include <scsi/scsi_tcq.h>
  15. #include <scsi/scsicam.h>
  16. #include <linux/delay.h>
  17. void
  18. qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
  19. {
  20. if (vha->vp_idx && vha->timer_active) {
  21. del_timer_sync(&vha->timer);
  22. vha->timer_active = 0;
  23. }
  24. }
  25. static uint32_t
  26. qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
  27. {
  28. uint32_t vp_id;
  29. struct qla_hw_data *ha = vha->hw;
  30. unsigned long flags;
  31. /* Find an empty slot and assign an vp_id */
  32. mutex_lock(&ha->vport_lock);
  33. vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
  34. if (vp_id > ha->max_npiv_vports) {
  35. ql_dbg(ql_dbg_vport, vha, 0xa000,
  36. "vp_id %d is bigger than max-supported %d.\n",
  37. vp_id, ha->max_npiv_vports);
  38. mutex_unlock(&ha->vport_lock);
  39. return vp_id;
  40. }
  41. set_bit(vp_id, ha->vp_idx_map);
  42. ha->num_vhosts++;
  43. vha->vp_idx = vp_id;
  44. spin_lock_irqsave(&ha->vport_slock, flags);
  45. list_add_tail(&vha->list, &ha->vp_list);
  46. qlt_update_vp_map(vha, SET_VP_IDX);
  47. spin_unlock_irqrestore(&ha->vport_slock, flags);
  48. mutex_unlock(&ha->vport_lock);
  49. return vp_id;
  50. }
  51. void
  52. qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
  53. {
  54. uint16_t vp_id;
  55. struct qla_hw_data *ha = vha->hw;
  56. unsigned long flags = 0;
  57. mutex_lock(&ha->vport_lock);
  58. /*
  59. * Wait for all pending activities to finish before removing vport from
  60. * the list.
  61. * Lock needs to be held for safe removal from the list (it
  62. * ensures no active vp_list traversal while the vport is removed
  63. * from the queue)
  64. */
  65. wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
  66. 10*HZ);
  67. spin_lock_irqsave(&ha->vport_slock, flags);
  68. if (atomic_read(&vha->vref_count)) {
  69. ql_dbg(ql_dbg_vport, vha, 0xfffa,
  70. "vha->vref_count=%u timeout\n", vha->vref_count.counter);
  71. vha->vref_count = (atomic_t)ATOMIC_INIT(0);
  72. }
  73. list_del(&vha->list);
  74. qlt_update_vp_map(vha, RESET_VP_IDX);
  75. spin_unlock_irqrestore(&ha->vport_slock, flags);
  76. vp_id = vha->vp_idx;
  77. ha->num_vhosts--;
  78. clear_bit(vp_id, ha->vp_idx_map);
  79. mutex_unlock(&ha->vport_lock);
  80. }
  81. static scsi_qla_host_t *
  82. qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
  83. {
  84. scsi_qla_host_t *vha;
  85. struct scsi_qla_host *tvha;
  86. unsigned long flags;
  87. spin_lock_irqsave(&ha->vport_slock, flags);
  88. /* Locate matching device in database. */
  89. list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
  90. if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
  91. spin_unlock_irqrestore(&ha->vport_slock, flags);
  92. return vha;
  93. }
  94. }
  95. spin_unlock_irqrestore(&ha->vport_slock, flags);
  96. return NULL;
  97. }
  98. /*
  99. * qla2x00_mark_vp_devices_dead
  100. * Updates fcport state when device goes offline.
  101. *
  102. * Input:
  103. * ha = adapter block pointer.
  104. * fcport = port structure pointer.
  105. *
  106. * Return:
  107. * None.
  108. *
  109. * Context:
  110. */
  111. static void
  112. qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
  113. {
  114. /*
  115. * !!! NOTE !!!
  116. * This function, if called in contexts other than vp create, disable
  117. * or delete, please make sure this is synchronized with the
  118. * delete thread.
  119. */
  120. fc_port_t *fcport;
  121. list_for_each_entry(fcport, &vha->vp_fcports, list) {
  122. ql_dbg(ql_dbg_vport, vha, 0xa001,
  123. "Marking port dead, loop_id=0x%04x : %x.\n",
  124. fcport->loop_id, fcport->vha->vp_idx);
  125. qla2x00_mark_device_lost(vha, fcport, 0, 0);
  126. qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
  127. }
  128. }
  129. int
  130. qla24xx_disable_vp(scsi_qla_host_t *vha)
  131. {
  132. unsigned long flags;
  133. int ret;
  134. ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  135. atomic_set(&vha->loop_state, LOOP_DOWN);
  136. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  137. /* Remove port id from vp target map */
  138. spin_lock_irqsave(&vha->hw->vport_slock, flags);
  139. qlt_update_vp_map(vha, RESET_AL_PA);
  140. spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
  141. qla2x00_mark_vp_devices_dead(vha);
  142. atomic_set(&vha->vp_state, VP_FAILED);
  143. vha->flags.management_server_logged_in = 0;
  144. if (ret == QLA_SUCCESS) {
  145. fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
  146. } else {
  147. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  148. return -1;
  149. }
  150. return 0;
  151. }
  152. int
  153. qla24xx_enable_vp(scsi_qla_host_t *vha)
  154. {
  155. int ret;
  156. struct qla_hw_data *ha = vha->hw;
  157. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  158. /* Check if physical ha port is Up */
  159. if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
  160. atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
  161. !(ha->current_topology & ISP_CFG_F)) {
  162. vha->vp_err_state = VP_ERR_PORTDWN;
  163. fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
  164. goto enable_failed;
  165. }
  166. /* Initialize the new vport unless it is a persistent port */
  167. mutex_lock(&ha->vport_lock);
  168. ret = qla24xx_modify_vp_config(vha);
  169. mutex_unlock(&ha->vport_lock);
  170. if (ret != QLA_SUCCESS) {
  171. fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
  172. goto enable_failed;
  173. }
  174. ql_dbg(ql_dbg_taskm, vha, 0x801a,
  175. "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
  176. return 0;
  177. enable_failed:
  178. ql_dbg(ql_dbg_taskm, vha, 0x801b,
  179. "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
  180. return 1;
  181. }
  182. static void
  183. qla24xx_configure_vp(scsi_qla_host_t *vha)
  184. {
  185. struct fc_vport *fc_vport;
  186. int ret;
  187. fc_vport = vha->fc_vport;
  188. ql_dbg(ql_dbg_vport, vha, 0xa002,
  189. "%s: change request #3.\n", __func__);
  190. ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
  191. if (ret != QLA_SUCCESS) {
  192. ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
  193. "receiving of RSCN requests: 0x%x.\n", ret);
  194. return;
  195. } else {
  196. /* Corresponds to SCR enabled */
  197. clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
  198. }
  199. vha->flags.online = 1;
  200. if (qla24xx_configure_vhba(vha))
  201. return;
  202. atomic_set(&vha->vp_state, VP_ACTIVE);
  203. fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
  204. }
  205. void
  206. qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
  207. {
  208. scsi_qla_host_t *vha;
  209. struct qla_hw_data *ha = rsp->hw;
  210. int i = 0;
  211. unsigned long flags;
  212. spin_lock_irqsave(&ha->vport_slock, flags);
  213. list_for_each_entry(vha, &ha->vp_list, list) {
  214. if (vha->vp_idx) {
  215. atomic_inc(&vha->vref_count);
  216. spin_unlock_irqrestore(&ha->vport_slock, flags);
  217. switch (mb[0]) {
  218. case MBA_LIP_OCCURRED:
  219. case MBA_LOOP_UP:
  220. case MBA_LOOP_DOWN:
  221. case MBA_LIP_RESET:
  222. case MBA_POINT_TO_POINT:
  223. case MBA_CHG_IN_CONNECTION:
  224. case MBA_PORT_UPDATE:
  225. case MBA_RSCN_UPDATE:
  226. ql_dbg(ql_dbg_async, vha, 0x5024,
  227. "Async_event for VP[%d], mb=0x%x vha=%p.\n",
  228. i, *mb, vha);
  229. qla2x00_async_event(vha, rsp, mb);
  230. break;
  231. }
  232. spin_lock_irqsave(&ha->vport_slock, flags);
  233. atomic_dec(&vha->vref_count);
  234. wake_up(&vha->vref_waitq);
  235. }
  236. i++;
  237. }
  238. spin_unlock_irqrestore(&ha->vport_slock, flags);
  239. }
  240. int
  241. qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
  242. {
  243. /*
  244. * Physical port will do most of the abort and recovery work. We can
  245. * just treat it as a loop down
  246. */
  247. if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
  248. atomic_set(&vha->loop_state, LOOP_DOWN);
  249. qla2x00_mark_all_devices_lost(vha, 0);
  250. } else {
  251. if (!atomic_read(&vha->loop_down_timer))
  252. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  253. }
  254. /*
  255. * To exclusively reset vport, we need to log it out first. Note: this
  256. * control_vp can fail if ISP reset is already issued, this is
  257. * expected, as the vp would be already logged out due to ISP reset.
  258. */
  259. if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
  260. qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
  261. ql_dbg(ql_dbg_taskm, vha, 0x801d,
  262. "Scheduling enable of Vport %d.\n", vha->vp_idx);
  263. return qla24xx_enable_vp(vha);
  264. }
  265. static int
  266. qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
  267. {
  268. struct qla_hw_data *ha = vha->hw;
  269. scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  270. ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
  271. "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
  272. qla2x00_do_work(vha);
  273. /* Check if Fw is ready to configure VP first */
  274. if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
  275. if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
  276. /* VP acquired. complete port configuration */
  277. ql_dbg(ql_dbg_dpc, vha, 0x4014,
  278. "Configure VP scheduled.\n");
  279. qla24xx_configure_vp(vha);
  280. ql_dbg(ql_dbg_dpc, vha, 0x4015,
  281. "Configure VP end.\n");
  282. return 0;
  283. }
  284. }
  285. if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
  286. ql_dbg(ql_dbg_dpc, vha, 0x4016,
  287. "FCPort update scheduled.\n");
  288. qla2x00_update_fcports(vha);
  289. clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
  290. ql_dbg(ql_dbg_dpc, vha, 0x4017,
  291. "FCPort update end.\n");
  292. }
  293. if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
  294. !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
  295. atomic_read(&vha->loop_state) != LOOP_DOWN) {
  296. ql_dbg(ql_dbg_dpc, vha, 0x4018,
  297. "Relogin needed scheduled.\n");
  298. qla2x00_relogin(vha);
  299. ql_dbg(ql_dbg_dpc, vha, 0x4019,
  300. "Relogin needed end.\n");
  301. }
  302. if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
  303. (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
  304. clear_bit(RESET_ACTIVE, &vha->dpc_flags);
  305. }
  306. if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
  307. if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
  308. ql_dbg(ql_dbg_dpc, vha, 0x401a,
  309. "Loop resync scheduled.\n");
  310. qla2x00_loop_resync(vha);
  311. clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
  312. ql_dbg(ql_dbg_dpc, vha, 0x401b,
  313. "Loop resync end.\n");
  314. }
  315. }
  316. ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
  317. "Exiting %s.\n", __func__);
  318. return 0;
  319. }
  320. void
  321. qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
  322. {
  323. struct qla_hw_data *ha = vha->hw;
  324. scsi_qla_host_t *vp;
  325. unsigned long flags = 0;
  326. if (vha->vp_idx)
  327. return;
  328. if (list_empty(&ha->vp_list))
  329. return;
  330. clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
  331. if (!(ha->current_topology & ISP_CFG_F))
  332. return;
  333. spin_lock_irqsave(&ha->vport_slock, flags);
  334. list_for_each_entry(vp, &ha->vp_list, list) {
  335. if (vp->vp_idx) {
  336. atomic_inc(&vp->vref_count);
  337. spin_unlock_irqrestore(&ha->vport_slock, flags);
  338. qla2x00_do_dpc_vp(vp);
  339. spin_lock_irqsave(&ha->vport_slock, flags);
  340. atomic_dec(&vp->vref_count);
  341. }
  342. }
  343. spin_unlock_irqrestore(&ha->vport_slock, flags);
  344. }
  345. int
  346. qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
  347. {
  348. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  349. struct qla_hw_data *ha = base_vha->hw;
  350. scsi_qla_host_t *vha;
  351. uint8_t port_name[WWN_SIZE];
  352. if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
  353. return VPCERR_UNSUPPORTED;
  354. /* Check up the F/W and H/W support NPIV */
  355. if (!ha->flags.npiv_supported)
  356. return VPCERR_UNSUPPORTED;
  357. /* Check up whether npiv supported switch presented */
  358. if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
  359. return VPCERR_NO_FABRIC_SUPP;
  360. /* Check up unique WWPN */
  361. u64_to_wwn(fc_vport->port_name, port_name);
  362. if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
  363. return VPCERR_BAD_WWN;
  364. vha = qla24xx_find_vhost_by_name(ha, port_name);
  365. if (vha)
  366. return VPCERR_BAD_WWN;
  367. /* Check up max-npiv-supports */
  368. if (ha->num_vhosts > ha->max_npiv_vports) {
  369. ql_dbg(ql_dbg_vport, vha, 0xa004,
  370. "num_vhosts %ud is bigger "
  371. "than max_npiv_vports %ud.\n",
  372. ha->num_vhosts, ha->max_npiv_vports);
  373. return VPCERR_UNSUPPORTED;
  374. }
  375. return 0;
  376. }
  377. scsi_qla_host_t *
  378. qla24xx_create_vhost(struct fc_vport *fc_vport)
  379. {
  380. scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
  381. struct qla_hw_data *ha = base_vha->hw;
  382. scsi_qla_host_t *vha;
  383. struct scsi_host_template *sht = &qla2xxx_driver_template;
  384. struct Scsi_Host *host;
  385. vha = qla2x00_create_host(sht, ha);
  386. if (!vha) {
  387. ql_log(ql_log_warn, vha, 0xa005,
  388. "scsi_host_alloc() failed for vport.\n");
  389. return(NULL);
  390. }
  391. host = vha->host;
  392. fc_vport->dd_data = vha;
  393. /* New host info */
  394. u64_to_wwn(fc_vport->node_name, vha->node_name);
  395. u64_to_wwn(fc_vport->port_name, vha->port_name);
  396. vha->fc_vport = fc_vport;
  397. vha->device_flags = 0;
  398. vha->vp_idx = qla24xx_allocate_vp_id(vha);
  399. if (vha->vp_idx > ha->max_npiv_vports) {
  400. ql_dbg(ql_dbg_vport, vha, 0xa006,
  401. "Couldn't allocate vp_id.\n");
  402. goto create_vhost_failed;
  403. }
  404. vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
  405. vha->dpc_flags = 0L;
  406. /*
  407. * To fix the issue of processing a parent's RSCN for the vport before
  408. * its SCR is complete.
  409. */
  410. set_bit(VP_SCR_NEEDED, &vha->vp_flags);
  411. atomic_set(&vha->loop_state, LOOP_DOWN);
  412. atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
  413. qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
  414. vha->req = base_vha->req;
  415. host->can_queue = base_vha->req->length + 128;
  416. host->cmd_per_lun = 3;
  417. if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
  418. host->max_cmd_len = 32;
  419. else
  420. host->max_cmd_len = MAX_CMDSZ;
  421. host->max_channel = MAX_BUSES - 1;
  422. host->max_lun = ql2xmaxlun;
  423. host->unique_id = host->host_no;
  424. host->max_id = ha->max_fibre_devices;
  425. host->transportt = qla2xxx_transport_vport_template;
  426. ql_dbg(ql_dbg_vport, vha, 0xa007,
  427. "Detect vport hba %ld at address = %p.\n",
  428. vha->host_no, vha);
  429. vha->flags.init_done = 1;
  430. mutex_lock(&ha->vport_lock);
  431. set_bit(vha->vp_idx, ha->vp_idx_map);
  432. ha->cur_vport_count++;
  433. mutex_unlock(&ha->vport_lock);
  434. return vha;
  435. create_vhost_failed:
  436. return NULL;
  437. }
  438. static void
  439. qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
  440. {
  441. struct qla_hw_data *ha = vha->hw;
  442. uint16_t que_id = req->id;
  443. dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
  444. sizeof(request_t), req->ring, req->dma);
  445. req->ring = NULL;
  446. req->dma = 0;
  447. if (que_id) {
  448. ha->req_q_map[que_id] = NULL;
  449. mutex_lock(&ha->vport_lock);
  450. clear_bit(que_id, ha->req_qid_map);
  451. mutex_unlock(&ha->vport_lock);
  452. }
  453. kfree(req->outstanding_cmds);
  454. kfree(req);
  455. req = NULL;
  456. }
  457. static void
  458. qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  459. {
  460. struct qla_hw_data *ha = vha->hw;
  461. uint16_t que_id = rsp->id;
  462. if (rsp->msix && rsp->msix->have_irq) {
  463. free_irq(rsp->msix->vector, rsp);
  464. rsp->msix->have_irq = 0;
  465. rsp->msix->rsp = NULL;
  466. }
  467. dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
  468. sizeof(response_t), rsp->ring, rsp->dma);
  469. rsp->ring = NULL;
  470. rsp->dma = 0;
  471. if (que_id) {
  472. ha->rsp_q_map[que_id] = NULL;
  473. mutex_lock(&ha->vport_lock);
  474. clear_bit(que_id, ha->rsp_qid_map);
  475. mutex_unlock(&ha->vport_lock);
  476. }
  477. kfree(rsp);
  478. rsp = NULL;
  479. }
  480. int
  481. qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
  482. {
  483. int ret = -1;
  484. if (req) {
  485. req->options |= BIT_0;
  486. ret = qla25xx_init_req_que(vha, req);
  487. }
  488. if (ret == QLA_SUCCESS)
  489. qla25xx_free_req_que(vha, req);
  490. return ret;
  491. }
  492. static int
  493. qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
  494. {
  495. int ret = -1;
  496. if (rsp) {
  497. rsp->options |= BIT_0;
  498. ret = qla25xx_init_rsp_que(vha, rsp);
  499. }
  500. if (ret == QLA_SUCCESS)
  501. qla25xx_free_rsp_que(vha, rsp);
  502. return ret;
  503. }
  504. /* Delete all queues for a given vhost */
  505. int
  506. qla25xx_delete_queues(struct scsi_qla_host *vha)
  507. {
  508. int cnt, ret = 0;
  509. struct req_que *req = NULL;
  510. struct rsp_que *rsp = NULL;
  511. struct qla_hw_data *ha = vha->hw;
  512. /* Delete request queues */
  513. for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
  514. req = ha->req_q_map[cnt];
  515. if (req && test_bit(cnt, ha->req_qid_map)) {
  516. ret = qla25xx_delete_req_que(vha, req);
  517. if (ret != QLA_SUCCESS) {
  518. ql_log(ql_log_warn, vha, 0x00ea,
  519. "Couldn't delete req que %d.\n",
  520. req->id);
  521. return ret;
  522. }
  523. }
  524. }
  525. /* Delete response queues */
  526. for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
  527. rsp = ha->rsp_q_map[cnt];
  528. if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
  529. ret = qla25xx_delete_rsp_que(vha, rsp);
  530. if (ret != QLA_SUCCESS) {
  531. ql_log(ql_log_warn, vha, 0x00eb,
  532. "Couldn't delete rsp que %d.\n",
  533. rsp->id);
  534. return ret;
  535. }
  536. }
  537. }
  538. return ret;
  539. }
  540. int
  541. qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
  542. uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
  543. {
  544. int ret = 0;
  545. struct req_que *req = NULL;
  546. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  547. uint16_t que_id = 0;
  548. device_reg_t *reg;
  549. uint32_t cnt;
  550. req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
  551. if (req == NULL) {
  552. ql_log(ql_log_fatal, base_vha, 0x00d9,
  553. "Failed to allocate memory for request queue.\n");
  554. goto failed;
  555. }
  556. req->length = REQUEST_ENTRY_CNT_24XX;
  557. req->ring = dma_alloc_coherent(&ha->pdev->dev,
  558. (req->length + 1) * sizeof(request_t),
  559. &req->dma, GFP_KERNEL);
  560. if (req->ring == NULL) {
  561. ql_log(ql_log_fatal, base_vha, 0x00da,
  562. "Failed to allocate memory for request_ring.\n");
  563. goto que_failed;
  564. }
  565. ret = qla2x00_alloc_outstanding_cmds(ha, req);
  566. if (ret != QLA_SUCCESS)
  567. goto que_failed;
  568. mutex_lock(&ha->vport_lock);
  569. que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
  570. if (que_id >= ha->max_req_queues) {
  571. mutex_unlock(&ha->vport_lock);
  572. ql_log(ql_log_warn, base_vha, 0x00db,
  573. "No resources to create additional request queue.\n");
  574. goto que_failed;
  575. }
  576. set_bit(que_id, ha->req_qid_map);
  577. ha->req_q_map[que_id] = req;
  578. req->rid = rid;
  579. req->vp_idx = vp_idx;
  580. req->qos = qos;
  581. ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
  582. "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
  583. que_id, req->rid, req->vp_idx, req->qos);
  584. ql_dbg(ql_dbg_init, base_vha, 0x00dc,
  585. "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
  586. que_id, req->rid, req->vp_idx, req->qos);
  587. if (rsp_que < 0)
  588. req->rsp = NULL;
  589. else
  590. req->rsp = ha->rsp_q_map[rsp_que];
  591. /* Use alternate PCI bus number */
  592. if (MSB(req->rid))
  593. options |= BIT_4;
  594. /* Use alternate PCI devfn */
  595. if (LSB(req->rid))
  596. options |= BIT_5;
  597. req->options = options;
  598. ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
  599. "options=0x%x.\n", req->options);
  600. ql_dbg(ql_dbg_init, base_vha, 0x00dd,
  601. "options=0x%x.\n", req->options);
  602. for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
  603. req->outstanding_cmds[cnt] = NULL;
  604. req->current_outstanding_cmd = 1;
  605. req->ring_ptr = req->ring;
  606. req->ring_index = 0;
  607. req->cnt = req->length;
  608. req->id = que_id;
  609. reg = ISP_QUE_REG(ha, que_id);
  610. req->req_q_in = &reg->isp25mq.req_q_in;
  611. req->req_q_out = &reg->isp25mq.req_q_out;
  612. req->max_q_depth = ha->req_q_map[0]->max_q_depth;
  613. req->out_ptr = (void *)(req->ring + req->length);
  614. mutex_unlock(&ha->vport_lock);
  615. ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
  616. "ring_ptr=%p ring_index=%d, "
  617. "cnt=%d id=%d max_q_depth=%d.\n",
  618. req->ring_ptr, req->ring_index,
  619. req->cnt, req->id, req->max_q_depth);
  620. ql_dbg(ql_dbg_init, base_vha, 0x00de,
  621. "ring_ptr=%p ring_index=%d, "
  622. "cnt=%d id=%d max_q_depth=%d.\n",
  623. req->ring_ptr, req->ring_index, req->cnt,
  624. req->id, req->max_q_depth);
  625. ret = qla25xx_init_req_que(base_vha, req);
  626. if (ret != QLA_SUCCESS) {
  627. ql_log(ql_log_fatal, base_vha, 0x00df,
  628. "%s failed.\n", __func__);
  629. mutex_lock(&ha->vport_lock);
  630. clear_bit(que_id, ha->req_qid_map);
  631. mutex_unlock(&ha->vport_lock);
  632. goto que_failed;
  633. }
  634. return req->id;
  635. que_failed:
  636. qla25xx_free_req_que(base_vha, req);
  637. failed:
  638. return 0;
  639. }
  640. static void qla_do_work(struct work_struct *work)
  641. {
  642. unsigned long flags;
  643. struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
  644. struct scsi_qla_host *vha;
  645. struct qla_hw_data *ha = rsp->hw;
  646. spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
  647. vha = pci_get_drvdata(ha->pdev);
  648. qla24xx_process_response_queue(vha, rsp);
  649. spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
  650. }
  651. /* create response queue */
  652. int
  653. qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
  654. uint8_t vp_idx, uint16_t rid, int req)
  655. {
  656. int ret = 0;
  657. struct rsp_que *rsp = NULL;
  658. struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
  659. uint16_t que_id = 0;
  660. device_reg_t *reg;
  661. rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
  662. if (rsp == NULL) {
  663. ql_log(ql_log_warn, base_vha, 0x0066,
  664. "Failed to allocate memory for response queue.\n");
  665. goto failed;
  666. }
  667. rsp->length = RESPONSE_ENTRY_CNT_MQ;
  668. rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
  669. (rsp->length + 1) * sizeof(response_t),
  670. &rsp->dma, GFP_KERNEL);
  671. if (rsp->ring == NULL) {
  672. ql_log(ql_log_warn, base_vha, 0x00e1,
  673. "Failed to allocate memory for response ring.\n");
  674. goto que_failed;
  675. }
  676. mutex_lock(&ha->vport_lock);
  677. que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
  678. if (que_id >= ha->max_rsp_queues) {
  679. mutex_unlock(&ha->vport_lock);
  680. ql_log(ql_log_warn, base_vha, 0x00e2,
  681. "No resources to create additional request queue.\n");
  682. goto que_failed;
  683. }
  684. set_bit(que_id, ha->rsp_qid_map);
  685. if (ha->flags.msix_enabled)
  686. rsp->msix = &ha->msix_entries[que_id + 1];
  687. else
  688. ql_log(ql_log_warn, base_vha, 0x00e3,
  689. "MSIX not enabled.\n");
  690. ha->rsp_q_map[que_id] = rsp;
  691. rsp->rid = rid;
  692. rsp->vp_idx = vp_idx;
  693. rsp->hw = ha;
  694. ql_dbg(ql_dbg_init, base_vha, 0x00e4,
  695. "queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
  696. que_id, rsp->rid, rsp->vp_idx, rsp->hw);
  697. /* Use alternate PCI bus number */
  698. if (MSB(rsp->rid))
  699. options |= BIT_4;
  700. /* Use alternate PCI devfn */
  701. if (LSB(rsp->rid))
  702. options |= BIT_5;
  703. /* Enable MSIX handshake mode on for uncapable adapters */
  704. if (!IS_MSIX_NACK_CAPABLE(ha))
  705. options |= BIT_6;
  706. rsp->options = options;
  707. rsp->id = que_id;
  708. reg = ISP_QUE_REG(ha, que_id);
  709. rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
  710. rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
  711. rsp->in_ptr = (void *)(rsp->ring + rsp->length);
  712. mutex_unlock(&ha->vport_lock);
  713. ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
  714. "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
  715. rsp->options, rsp->id, rsp->rsp_q_in,
  716. rsp->rsp_q_out);
  717. ql_dbg(ql_dbg_init, base_vha, 0x00e5,
  718. "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
  719. rsp->options, rsp->id, rsp->rsp_q_in,
  720. rsp->rsp_q_out);
  721. ret = qla25xx_request_irq(rsp);
  722. if (ret)
  723. goto que_failed;
  724. ret = qla25xx_init_rsp_que(base_vha, rsp);
  725. if (ret != QLA_SUCCESS) {
  726. ql_log(ql_log_fatal, base_vha, 0x00e7,
  727. "%s failed.\n", __func__);
  728. mutex_lock(&ha->vport_lock);
  729. clear_bit(que_id, ha->rsp_qid_map);
  730. mutex_unlock(&ha->vport_lock);
  731. goto que_failed;
  732. }
  733. if (req >= 0)
  734. rsp->req = ha->req_q_map[req];
  735. else
  736. rsp->req = NULL;
  737. qla2x00_init_response_q_entries(rsp);
  738. if (rsp->hw->wq)
  739. INIT_WORK(&rsp->q_work, qla_do_work);
  740. return rsp->id;
  741. que_failed:
  742. qla25xx_free_rsp_que(base_vha, rsp);
  743. failed:
  744. return 0;
  745. }