snic_main.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041
  1. /*
  2. * Copyright 2014 Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This program is free software; you may redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; version 2 of the License.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  9. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  10. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  11. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  12. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  13. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  14. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  15. * SOFTWARE.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/mempool.h>
  19. #include <linux/string.h>
  20. #include <linux/slab.h>
  21. #include <linux/errno.h>
  22. #include <linux/init.h>
  23. #include <linux/pci.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/workqueue.h>
  28. #include <scsi/scsi_host.h>
  29. #include <scsi/scsi_tcq.h>
  30. #include "snic.h"
  31. #include "snic_fwint.h"
  32. #define PCI_DEVICE_ID_CISCO_SNIC 0x0046
  33. /* Supported devices by snic module */
  34. static struct pci_device_id snic_id_table[] = {
  35. {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
  36. { 0, } /* end of table */
  37. };
  38. unsigned int snic_log_level = 0x0;
  39. module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
  40. MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
  41. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  42. unsigned int snic_trace_max_pages = 16;
  43. module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
  44. MODULE_PARM_DESC(snic_trace_max_pages,
  45. "Total allocated memory pages for snic trace buffer");
  46. #endif
  47. unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
  48. module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
  49. MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
  50. /*
  51. * snic_slave_alloc : callback function to SCSI Mid Layer, called on
  52. * scsi device initialization.
  53. */
  54. static int
  55. snic_slave_alloc(struct scsi_device *sdev)
  56. {
  57. struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
  58. if (!tgt || snic_tgt_chkready(tgt))
  59. return -ENXIO;
  60. return 0;
  61. }
  62. /*
  63. * snic_slave_configure : callback function to SCSI Mid Layer, called on
  64. * scsi device initialization.
  65. */
  66. static int
  67. snic_slave_configure(struct scsi_device *sdev)
  68. {
  69. struct snic *snic = shost_priv(sdev->host);
  70. u32 qdepth = 0, max_ios = 0;
  71. int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
  72. /* Set Queue Depth */
  73. max_ios = snic_max_qdepth;
  74. qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
  75. scsi_change_queue_depth(sdev, qdepth);
  76. if (snic->fwinfo.io_tmo > 1)
  77. tmo = snic->fwinfo.io_tmo * HZ;
  78. /* FW requires extended timeouts */
  79. blk_queue_rq_timeout(sdev->request_queue, tmo);
  80. return 0;
  81. }
  82. static int
  83. snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
  84. {
  85. struct snic *snic = shost_priv(sdev->host);
  86. int qsz = 0;
  87. qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
  88. if (qsz < sdev->queue_depth)
  89. atomic64_inc(&snic->s_stats.misc.qsz_rampdown);
  90. else if (qsz > sdev->queue_depth)
  91. atomic64_inc(&snic->s_stats.misc.qsz_rampup);
  92. atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth);
  93. scsi_change_queue_depth(sdev, qsz);
  94. return sdev->queue_depth;
  95. }
  96. static struct scsi_host_template snic_host_template = {
  97. .module = THIS_MODULE,
  98. .name = SNIC_DRV_NAME,
  99. .queuecommand = snic_queuecommand,
  100. .eh_abort_handler = snic_abort_cmd,
  101. .eh_device_reset_handler = snic_device_reset,
  102. .eh_host_reset_handler = snic_host_reset,
  103. .slave_alloc = snic_slave_alloc,
  104. .slave_configure = snic_slave_configure,
  105. .change_queue_depth = snic_change_queue_depth,
  106. .this_id = -1,
  107. .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
  108. .can_queue = SNIC_MAX_IO_REQ,
  109. .use_clustering = ENABLE_CLUSTERING,
  110. .sg_tablesize = SNIC_MAX_SG_DESC_CNT,
  111. .max_sectors = 0x800,
  112. .shost_attrs = snic_attrs,
  113. .track_queue_depth = 1,
  114. .cmd_size = sizeof(struct snic_internal_io_state),
  115. .proc_name = "snic_scsi",
  116. };
  117. /*
  118. * snic_handle_link_event : Handles link events such as link up/down/error
  119. */
  120. void
  121. snic_handle_link_event(struct snic *snic)
  122. {
  123. unsigned long flags;
  124. spin_lock_irqsave(&snic->snic_lock, flags);
  125. if (snic->stop_link_events) {
  126. spin_unlock_irqrestore(&snic->snic_lock, flags);
  127. return;
  128. }
  129. spin_unlock_irqrestore(&snic->snic_lock, flags);
  130. queue_work(snic_glob->event_q, &snic->link_work);
  131. } /* end of snic_handle_link_event */
  132. /*
  133. * snic_notify_set : sets notification area
  134. * This notification area is to receive events from fw
  135. * Note: snic supports only MSIX interrupts, in which we can just call
  136. * svnic_dev_notify_set directly
  137. */
  138. static int
  139. snic_notify_set(struct snic *snic)
  140. {
  141. int ret = 0;
  142. enum vnic_dev_intr_mode intr_mode;
  143. intr_mode = svnic_dev_get_intr_mode(snic->vdev);
  144. if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
  145. ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
  146. } else {
  147. SNIC_HOST_ERR(snic->shost,
  148. "Interrupt mode should be setup before devcmd notify set %d\n",
  149. intr_mode);
  150. ret = -1;
  151. }
  152. return ret;
  153. } /* end of snic_notify_set */
  154. /*
  155. * snic_dev_wait : polls vnic open status.
  156. */
  157. static int
  158. snic_dev_wait(struct vnic_dev *vdev,
  159. int (*start)(struct vnic_dev *, int),
  160. int (*finished)(struct vnic_dev *, int *),
  161. int arg)
  162. {
  163. unsigned long time;
  164. int ret, done;
  165. int retry_cnt = 0;
  166. ret = start(vdev, arg);
  167. if (ret)
  168. return ret;
  169. /*
  170. * Wait for func to complete...2 seconds max.
  171. *
  172. * Sometimes schedule_timeout_uninterruptible take long time
  173. * to wakeup, which results skipping retry. The retry counter
  174. * ensures to retry at least two times.
  175. */
  176. time = jiffies + (HZ * 2);
  177. do {
  178. ret = finished(vdev, &done);
  179. if (ret)
  180. return ret;
  181. if (done)
  182. return 0;
  183. schedule_timeout_uninterruptible(HZ/10);
  184. ++retry_cnt;
  185. } while (time_after(time, jiffies) || (retry_cnt < 3));
  186. return -ETIMEDOUT;
  187. } /* end of snic_dev_wait */
  188. /*
  189. * snic_cleanup: called by snic_remove
  190. * Stops the snic device, masks all interrupts, Completed CQ entries are
  191. * drained. Posted WQ/RQ/Copy-WQ entries are cleanup
  192. */
  193. static int
  194. snic_cleanup(struct snic *snic)
  195. {
  196. unsigned int i;
  197. int ret;
  198. svnic_dev_disable(snic->vdev);
  199. for (i = 0; i < snic->intr_count; i++)
  200. svnic_intr_mask(&snic->intr[i]);
  201. for (i = 0; i < snic->wq_count; i++) {
  202. ret = svnic_wq_disable(&snic->wq[i]);
  203. if (ret)
  204. return ret;
  205. }
  206. /* Clean up completed IOs */
  207. snic_fwcq_cmpl_handler(snic, -1);
  208. snic_wq_cmpl_handler(snic, -1);
  209. /* Clean up the IOs that have not completed */
  210. for (i = 0; i < snic->wq_count; i++)
  211. svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
  212. for (i = 0; i < snic->cq_count; i++)
  213. svnic_cq_clean(&snic->cq[i]);
  214. for (i = 0; i < snic->intr_count; i++)
  215. svnic_intr_clean(&snic->intr[i]);
  216. /* Cleanup snic specific requests */
  217. snic_free_all_untagged_reqs(snic);
  218. /* Cleanup Pending SCSI commands */
  219. snic_shutdown_scsi_cleanup(snic);
  220. for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
  221. mempool_destroy(snic->req_pool[i]);
  222. return 0;
  223. } /* end of snic_cleanup */
  224. static void
  225. snic_iounmap(struct snic *snic)
  226. {
  227. if (snic->bar0.vaddr)
  228. iounmap(snic->bar0.vaddr);
  229. }
  230. /*
  231. * snic_vdev_open_done : polls for svnic_dev_open cmd completion.
  232. */
  233. static int
  234. snic_vdev_open_done(struct vnic_dev *vdev, int *done)
  235. {
  236. struct snic *snic = svnic_dev_priv(vdev);
  237. int ret;
  238. int nretries = 5;
  239. do {
  240. ret = svnic_dev_open_done(vdev, done);
  241. if (ret == 0)
  242. break;
  243. SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
  244. } while (nretries--);
  245. return ret;
  246. } /* end of snic_vdev_open_done */
  247. /*
  248. * snic_add_host : registers scsi host with ML
  249. */
  250. static int
  251. snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
  252. {
  253. int ret = 0;
  254. ret = scsi_add_host(shost, &pdev->dev);
  255. if (ret) {
  256. SNIC_HOST_ERR(shost,
  257. "snic: scsi_add_host failed. %d\n",
  258. ret);
  259. return ret;
  260. }
  261. SNIC_BUG_ON(shost->work_q != NULL);
  262. snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d",
  263. shost->host_no);
  264. shost->work_q = create_singlethread_workqueue(shost->work_q_name);
  265. if (!shost->work_q) {
  266. SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
  267. ret = -ENOMEM;
  268. }
  269. return ret;
  270. } /* end of snic_add_host */
  271. static void
  272. snic_del_host(struct Scsi_Host *shost)
  273. {
  274. if (!shost->work_q)
  275. return;
  276. destroy_workqueue(shost->work_q);
  277. shost->work_q = NULL;
  278. scsi_remove_host(shost);
  279. }
  280. int
  281. snic_get_state(struct snic *snic)
  282. {
  283. return atomic_read(&snic->state);
  284. }
  285. void
  286. snic_set_state(struct snic *snic, enum snic_state state)
  287. {
  288. SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
  289. snic_state_to_str(snic_get_state(snic)),
  290. snic_state_to_str(state));
  291. atomic_set(&snic->state, state);
  292. }
  293. /*
  294. * snic_probe : Initialize the snic interface.
  295. */
  296. static int
  297. snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  298. {
  299. struct Scsi_Host *shost;
  300. struct snic *snic;
  301. mempool_t *pool;
  302. unsigned long flags;
  303. u32 max_ios = 0;
  304. int ret, i;
  305. /* Device Information */
  306. SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
  307. pdev->vendor, pdev->device, pdev->subsystem_vendor,
  308. pdev->subsystem_device);
  309. SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
  310. pdev->bus->number, PCI_SLOT(pdev->devfn),
  311. PCI_FUNC(pdev->devfn));
  312. /*
  313. * Allocate SCSI Host and setup association between host, and snic
  314. */
  315. shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
  316. if (!shost) {
  317. SNIC_ERR("Unable to alloc scsi_host\n");
  318. ret = -ENOMEM;
  319. goto prob_end;
  320. }
  321. snic = shost_priv(shost);
  322. snic->shost = shost;
  323. snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
  324. shost->host_no);
  325. SNIC_HOST_INFO(shost,
  326. "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
  327. shost->host_no, snic, shost, pdev->bus->number,
  328. PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  329. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  330. /* Per snic debugfs init */
  331. ret = snic_stats_debugfs_init(snic);
  332. if (ret) {
  333. SNIC_HOST_ERR(snic->shost,
  334. "Failed to initialize debugfs stats\n");
  335. snic_stats_debugfs_remove(snic);
  336. }
  337. #endif
  338. /* Setup PCI Resources */
  339. pci_set_drvdata(pdev, snic);
  340. snic->pdev = pdev;
  341. ret = pci_enable_device(pdev);
  342. if (ret) {
  343. SNIC_HOST_ERR(shost,
  344. "Cannot enable PCI Resources, aborting : %d\n",
  345. ret);
  346. goto err_free_snic;
  347. }
  348. ret = pci_request_regions(pdev, SNIC_DRV_NAME);
  349. if (ret) {
  350. SNIC_HOST_ERR(shost,
  351. "Cannot obtain PCI Resources, aborting : %d\n",
  352. ret);
  353. goto err_pci_disable;
  354. }
  355. pci_set_master(pdev);
  356. /*
  357. * Query PCI Controller on system for DMA addressing
  358. * limitation for the device. Try 43-bit first, and
  359. * fail to 32-bit.
  360. */
  361. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
  362. if (ret) {
  363. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  364. if (ret) {
  365. SNIC_HOST_ERR(shost,
  366. "No Usable DMA Configuration, aborting %d\n",
  367. ret);
  368. goto err_rel_regions;
  369. }
  370. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  371. if (ret) {
  372. SNIC_HOST_ERR(shost,
  373. "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
  374. ret);
  375. goto err_rel_regions;
  376. }
  377. } else {
  378. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
  379. if (ret) {
  380. SNIC_HOST_ERR(shost,
  381. "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
  382. ret);
  383. goto err_rel_regions;
  384. }
  385. }
  386. /* Map vNIC resources from BAR0 */
  387. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  388. SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
  389. ret = -ENODEV;
  390. goto err_rel_regions;
  391. }
  392. snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
  393. if (!snic->bar0.vaddr) {
  394. SNIC_HOST_ERR(shost,
  395. "Cannot memory map BAR0 res hdr aborting.\n");
  396. ret = -ENODEV;
  397. goto err_rel_regions;
  398. }
  399. snic->bar0.bus_addr = pci_resource_start(pdev, 0);
  400. snic->bar0.len = pci_resource_len(pdev, 0);
  401. SNIC_BUG_ON(snic->bar0.bus_addr == 0);
  402. /* Devcmd2 Resource Allocation and Initialization */
  403. snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
  404. if (!snic->vdev) {
  405. SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
  406. ret = -ENODEV;
  407. goto err_iounmap;
  408. }
  409. ret = svnic_dev_cmd_init(snic->vdev, 0);
  410. if (ret) {
  411. SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
  412. goto err_vnic_unreg;
  413. }
  414. ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
  415. if (ret) {
  416. SNIC_HOST_ERR(shost,
  417. "vNIC dev open failed, aborting. %d\n",
  418. ret);
  419. goto err_vnic_unreg;
  420. }
  421. ret = svnic_dev_init(snic->vdev, 0);
  422. if (ret) {
  423. SNIC_HOST_ERR(shost,
  424. "vNIC dev init failed. aborting. %d\n",
  425. ret);
  426. goto err_dev_close;
  427. }
  428. /* Get vNIC information */
  429. ret = snic_get_vnic_config(snic);
  430. if (ret) {
  431. SNIC_HOST_ERR(shost,
  432. "Get vNIC configuration failed, aborting. %d\n",
  433. ret);
  434. goto err_dev_close;
  435. }
  436. /* Configure Maximum Outstanding IO reqs */
  437. max_ios = snic->config.io_throttle_count;
  438. if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
  439. shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
  440. max_t(u32, SNIC_MIN_IO_REQ, max_ios));
  441. snic->max_tag_id = shost->can_queue;
  442. shost->max_lun = snic->config.luns_per_tgt;
  443. shost->max_id = SNIC_MAX_TARGET;
  444. shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
  445. snic_get_res_counts(snic);
  446. /*
  447. * Assumption: Only MSIx is supported
  448. */
  449. ret = snic_set_intr_mode(snic);
  450. if (ret) {
  451. SNIC_HOST_ERR(shost,
  452. "Failed to set intr mode aborting. %d\n",
  453. ret);
  454. goto err_dev_close;
  455. }
  456. ret = snic_alloc_vnic_res(snic);
  457. if (ret) {
  458. SNIC_HOST_ERR(shost,
  459. "Failed to alloc vNIC resources aborting. %d\n",
  460. ret);
  461. goto err_clear_intr;
  462. }
  463. /* Initialize specific lists */
  464. INIT_LIST_HEAD(&snic->list);
  465. /*
  466. * spl_cmd_list for maintaining snic specific cmds
  467. * such as EXCH_VER_REQ, REPORT_TARGETS etc
  468. */
  469. INIT_LIST_HEAD(&snic->spl_cmd_list);
  470. spin_lock_init(&snic->spl_cmd_lock);
  471. /* initialize all snic locks */
  472. spin_lock_init(&snic->snic_lock);
  473. for (i = 0; i < SNIC_WQ_MAX; i++)
  474. spin_lock_init(&snic->wq_lock[i]);
  475. for (i = 0; i < SNIC_IO_LOCKS; i++)
  476. spin_lock_init(&snic->io_req_lock[i]);
  477. pool = mempool_create_slab_pool(2,
  478. snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
  479. if (!pool) {
  480. SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
  481. goto err_free_res;
  482. }
  483. snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
  484. pool = mempool_create_slab_pool(2,
  485. snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
  486. if (!pool) {
  487. SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
  488. goto err_free_dflt_sgl_pool;
  489. }
  490. snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
  491. pool = mempool_create_slab_pool(2,
  492. snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
  493. if (!pool) {
  494. SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
  495. goto err_free_max_sgl_pool;
  496. }
  497. snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
  498. /* Initialize snic state */
  499. atomic_set(&snic->state, SNIC_INIT);
  500. atomic_set(&snic->ios_inflight, 0);
  501. /* Setup notification buffer area */
  502. ret = snic_notify_set(snic);
  503. if (ret) {
  504. SNIC_HOST_ERR(shost,
  505. "Failed to alloc notify buffer aborting. %d\n",
  506. ret);
  507. goto err_free_tmreq_pool;
  508. }
  509. spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
  510. list_add_tail(&snic->list, &snic_glob->snic_list);
  511. spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
  512. snic_disc_init(&snic->disc);
  513. INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
  514. INIT_WORK(&snic->disc_work, snic_handle_disc);
  515. INIT_WORK(&snic->link_work, snic_handle_link);
  516. /* Enable all queues */
  517. for (i = 0; i < snic->wq_count; i++)
  518. svnic_wq_enable(&snic->wq[i]);
  519. ret = svnic_dev_enable_wait(snic->vdev);
  520. if (ret) {
  521. SNIC_HOST_ERR(shost,
  522. "vNIC dev enable failed w/ error %d\n",
  523. ret);
  524. goto err_vdev_enable;
  525. }
  526. ret = snic_request_intr(snic);
  527. if (ret) {
  528. SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
  529. goto err_req_intr;
  530. }
  531. for (i = 0; i < snic->intr_count; i++)
  532. svnic_intr_unmask(&snic->intr[i]);
  533. /* Get snic params */
  534. ret = snic_get_conf(snic);
  535. if (ret) {
  536. SNIC_HOST_ERR(shost,
  537. "Failed to get snic io config from FW w err %d\n",
  538. ret);
  539. goto err_get_conf;
  540. }
  541. /*
  542. * Initialization done with PCI system, hardware, firmware.
  543. * Add shost to SCSI
  544. */
  545. ret = snic_add_host(shost, pdev);
  546. if (ret) {
  547. SNIC_HOST_ERR(shost,
  548. "Adding scsi host Failed ... exiting. %d\n",
  549. ret);
  550. goto err_get_conf;
  551. }
  552. snic_set_state(snic, SNIC_ONLINE);
  553. ret = snic_disc_start(snic);
  554. if (ret) {
  555. SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
  556. ret);
  557. goto err_get_conf;
  558. }
  559. SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
  560. return 0;
  561. err_get_conf:
  562. snic_free_all_untagged_reqs(snic);
  563. for (i = 0; i < snic->intr_count; i++)
  564. svnic_intr_mask(&snic->intr[i]);
  565. snic_free_intr(snic);
  566. err_req_intr:
  567. svnic_dev_disable(snic->vdev);
  568. err_vdev_enable:
  569. svnic_dev_notify_unset(snic->vdev);
  570. for (i = 0; i < snic->wq_count; i++) {
  571. int rc = 0;
  572. rc = svnic_wq_disable(&snic->wq[i]);
  573. if (rc) {
  574. SNIC_HOST_ERR(shost,
  575. "WQ Disable Failed w/ err = %d\n", rc);
  576. break;
  577. }
  578. }
  579. snic_del_host(snic->shost);
  580. err_free_tmreq_pool:
  581. mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
  582. err_free_max_sgl_pool:
  583. mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
  584. err_free_dflt_sgl_pool:
  585. mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
  586. err_free_res:
  587. snic_free_vnic_res(snic);
  588. err_clear_intr:
  589. snic_clear_intr_mode(snic);
  590. err_dev_close:
  591. svnic_dev_close(snic->vdev);
  592. err_vnic_unreg:
  593. svnic_dev_unregister(snic->vdev);
  594. err_iounmap:
  595. snic_iounmap(snic);
  596. err_rel_regions:
  597. pci_release_regions(pdev);
  598. err_pci_disable:
  599. pci_disable_device(pdev);
  600. err_free_snic:
  601. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  602. snic_stats_debugfs_remove(snic);
  603. #endif
  604. scsi_host_put(shost);
  605. pci_set_drvdata(pdev, NULL);
  606. prob_end:
  607. SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
  608. pdev->bus->number, PCI_SLOT(pdev->devfn),
  609. PCI_FUNC(pdev->devfn));
  610. return ret;
  611. } /* end of snic_probe */
  612. /*
  613. * snic_remove : invoked on unbinding the interface to cleanup the
  614. * resources allocated in snic_probe on initialization.
  615. */
  616. static void
  617. snic_remove(struct pci_dev *pdev)
  618. {
  619. struct snic *snic = pci_get_drvdata(pdev);
  620. unsigned long flags;
  621. if (!snic) {
  622. SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
  623. pdev->bus->number, PCI_SLOT(pdev->devfn),
  624. PCI_FUNC(pdev->devfn));
  625. return;
  626. }
  627. /*
  628. * Mark state so that the workqueue thread stops forwarding
  629. * received frames and link events. ISR and other threads
  630. * that can queue work items will also stop creating work
  631. * items on the snic workqueue
  632. */
  633. snic_set_state(snic, SNIC_OFFLINE);
  634. spin_lock_irqsave(&snic->snic_lock, flags);
  635. snic->stop_link_events = 1;
  636. spin_unlock_irqrestore(&snic->snic_lock, flags);
  637. flush_workqueue(snic_glob->event_q);
  638. snic_disc_term(snic);
  639. spin_lock_irqsave(&snic->snic_lock, flags);
  640. snic->in_remove = 1;
  641. spin_unlock_irqrestore(&snic->snic_lock, flags);
  642. /*
  643. * This stops the snic device, masks all interrupts, Completed
  644. * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
  645. * cleanup
  646. */
  647. snic_cleanup(snic);
  648. spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
  649. list_del(&snic->list);
  650. spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
  651. snic_tgt_del_all(snic);
  652. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  653. snic_stats_debugfs_remove(snic);
  654. #endif
  655. snic_del_host(snic->shost);
  656. svnic_dev_notify_unset(snic->vdev);
  657. snic_free_intr(snic);
  658. snic_free_vnic_res(snic);
  659. snic_clear_intr_mode(snic);
  660. svnic_dev_close(snic->vdev);
  661. svnic_dev_unregister(snic->vdev);
  662. snic_iounmap(snic);
  663. pci_release_regions(pdev);
  664. pci_disable_device(pdev);
  665. pci_set_drvdata(pdev, NULL);
  666. /* this frees Scsi_Host and snic memory (continuous chunk) */
  667. scsi_host_put(snic->shost);
  668. } /* end of snic_remove */
  669. struct snic_global *snic_glob;
  670. /*
  671. * snic_global_data_init: Initialize SNIC Global Data
  672. * Notes: All the global lists, variables should be part of global data
  673. * this helps in debugging.
  674. */
  675. static int
  676. snic_global_data_init(void)
  677. {
  678. int ret = 0;
  679. struct kmem_cache *cachep;
  680. ssize_t len = 0;
  681. snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
  682. if (!snic_glob) {
  683. SNIC_ERR("Failed to allocate Global Context.\n");
  684. ret = -ENOMEM;
  685. goto gdi_end;
  686. }
  687. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  688. /* Debugfs related Initialization */
  689. /* Create debugfs entries for snic */
  690. ret = snic_debugfs_init();
  691. if (ret < 0) {
  692. SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n");
  693. snic_debugfs_term();
  694. /* continue even if it fails */
  695. }
  696. /* Trace related Initialization */
  697. /* Allocate memory for trace buffer */
  698. ret = snic_trc_init();
  699. if (ret < 0) {
  700. SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
  701. snic_trc_free();
  702. /* continue even if it fails */
  703. }
  704. #endif
  705. INIT_LIST_HEAD(&snic_glob->snic_list);
  706. spin_lock_init(&snic_glob->snic_list_lock);
  707. /* Create a cache for allocation of snic_host_req+default size ESGLs */
  708. len = sizeof(struct snic_req_info);
  709. len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
  710. cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
  711. SLAB_HWCACHE_ALIGN, NULL);
  712. if (!cachep) {
  713. SNIC_ERR("Failed to create snic default sgl slab\n");
  714. ret = -ENOMEM;
  715. goto err_dflt_req_slab;
  716. }
  717. snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
  718. /* Create a cache for allocation of max size Extended SGLs */
  719. len = sizeof(struct snic_req_info);
  720. len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
  721. cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
  722. SLAB_HWCACHE_ALIGN, NULL);
  723. if (!cachep) {
  724. SNIC_ERR("Failed to create snic max sgl slab\n");
  725. ret = -ENOMEM;
  726. goto err_max_req_slab;
  727. }
  728. snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
  729. len = sizeof(struct snic_host_req);
  730. cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
  731. SLAB_HWCACHE_ALIGN, NULL);
  732. if (!cachep) {
  733. SNIC_ERR("Failed to create snic tm req slab\n");
  734. ret = -ENOMEM;
  735. goto err_tmreq_slab;
  736. }
  737. snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
  738. /* snic_event queue */
  739. snic_glob->event_q = create_singlethread_workqueue("snic_event_wq");
  740. if (!snic_glob->event_q) {
  741. SNIC_ERR("snic event queue create failed\n");
  742. ret = -ENOMEM;
  743. goto err_eventq;
  744. }
  745. return ret;
  746. err_eventq:
  747. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
  748. err_tmreq_slab:
  749. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
  750. err_max_req_slab:
  751. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
  752. err_dflt_req_slab:
  753. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  754. snic_trc_free();
  755. snic_debugfs_term();
  756. #endif
  757. kfree(snic_glob);
  758. snic_glob = NULL;
  759. gdi_end:
  760. return ret;
  761. } /* end of snic_glob_init */
  762. /*
  763. * snic_global_data_cleanup : Frees SNIC Global Data
  764. */
  765. static void
  766. snic_global_data_cleanup(void)
  767. {
  768. SNIC_BUG_ON(snic_glob == NULL);
  769. destroy_workqueue(snic_glob->event_q);
  770. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
  771. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
  772. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
  773. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  774. /* Freeing Trace Resources */
  775. snic_trc_free();
  776. /* Freeing Debugfs Resources */
  777. snic_debugfs_term();
  778. #endif
  779. kfree(snic_glob);
  780. snic_glob = NULL;
  781. } /* end of snic_glob_cleanup */
  782. static struct pci_driver snic_driver = {
  783. .name = SNIC_DRV_NAME,
  784. .id_table = snic_id_table,
  785. .probe = snic_probe,
  786. .remove = snic_remove,
  787. };
  788. static int __init
  789. snic_init_module(void)
  790. {
  791. int ret = 0;
  792. #ifndef __x86_64__
  793. SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
  794. add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
  795. #endif
  796. SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
  797. ret = snic_global_data_init();
  798. if (ret) {
  799. SNIC_ERR("Failed to Initialize Global Data.\n");
  800. return ret;
  801. }
  802. ret = pci_register_driver(&snic_driver);
  803. if (ret < 0) {
  804. SNIC_ERR("PCI driver register error\n");
  805. goto err_pci_reg;
  806. }
  807. return ret;
  808. err_pci_reg:
  809. snic_global_data_cleanup();
  810. return ret;
  811. }
  812. static void __exit
  813. snic_cleanup_module(void)
  814. {
  815. pci_unregister_driver(&snic_driver);
  816. snic_global_data_cleanup();
  817. }
  818. module_init(snic_init_module);
  819. module_exit(snic_cleanup_module);
  820. MODULE_LICENSE("GPL v2");
  821. MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
  822. MODULE_VERSION(SNIC_DRV_VERSION);
  823. MODULE_DEVICE_TABLE(pci, snic_id_table);
  824. MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, "
  825. "Sesidhar Baddela <sebaddel@cisco.com>");