libsrp.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. /*******************************************************************************
  2. * SCSI RDMA Protocol lib functions
  3. *
  4. * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
  5. * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. ***********************************************************************/
  18. #define pr_fmt(fmt) "libsrp: " fmt
  19. #include <linux/printk.h>
  20. #include <linux/err.h>
  21. #include <linux/slab.h>
  22. #include <linux/kfifo.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/module.h>
  26. #include <scsi/srp.h>
  27. #include <target/target_core_base.h>
  28. #include "libsrp.h"
  29. #include "ibmvscsi_tgt.h"
  30. static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
  31. struct srp_buf **ring)
  32. {
  33. struct iu_entry *iue;
  34. int i;
  35. q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
  36. if (!q->pool)
  37. return -ENOMEM;
  38. q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
  39. if (!q->items)
  40. goto free_pool;
  41. spin_lock_init(&q->lock);
  42. kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *));
  43. for (i = 0, iue = q->items; i < max; i++) {
  44. kfifo_in(&q->queue, (void *)&iue, sizeof(void *));
  45. iue->sbuf = ring[i];
  46. iue++;
  47. }
  48. return 0;
  49. free_pool:
  50. kfree(q->pool);
  51. return -ENOMEM;
  52. }
  53. static void srp_iu_pool_free(struct srp_queue *q)
  54. {
  55. kfree(q->items);
  56. kfree(q->pool);
  57. }
  58. static struct srp_buf **srp_ring_alloc(struct device *dev,
  59. size_t max, size_t size)
  60. {
  61. struct srp_buf **ring;
  62. int i;
  63. ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
  64. if (!ring)
  65. return NULL;
  66. for (i = 0; i < max; i++) {
  67. ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL);
  68. if (!ring[i])
  69. goto out;
  70. ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
  71. GFP_KERNEL);
  72. if (!ring[i]->buf)
  73. goto out;
  74. }
  75. return ring;
  76. out:
  77. for (i = 0; i < max && ring[i]; i++) {
  78. if (ring[i]->buf) {
  79. dma_free_coherent(dev, size, ring[i]->buf,
  80. ring[i]->dma);
  81. }
  82. kfree(ring[i]);
  83. }
  84. kfree(ring);
  85. return NULL;
  86. }
  87. static void srp_ring_free(struct device *dev, struct srp_buf **ring,
  88. size_t max, size_t size)
  89. {
  90. int i;
  91. for (i = 0; i < max; i++) {
  92. dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
  93. kfree(ring[i]);
  94. }
  95. kfree(ring);
  96. }
  97. int srp_target_alloc(struct srp_target *target, struct device *dev,
  98. size_t nr, size_t iu_size)
  99. {
  100. int err;
  101. spin_lock_init(&target->lock);
  102. target->dev = dev;
  103. target->srp_iu_size = iu_size;
  104. target->rx_ring_size = nr;
  105. target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
  106. if (!target->rx_ring)
  107. return -ENOMEM;
  108. err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
  109. if (err)
  110. goto free_ring;
  111. dev_set_drvdata(target->dev, target);
  112. return 0;
  113. free_ring:
  114. srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
  115. return -ENOMEM;
  116. }
  117. void srp_target_free(struct srp_target *target)
  118. {
  119. dev_set_drvdata(target->dev, NULL);
  120. srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
  121. target->srp_iu_size);
  122. srp_iu_pool_free(&target->iu_queue);
  123. }
  124. struct iu_entry *srp_iu_get(struct srp_target *target)
  125. {
  126. struct iu_entry *iue = NULL;
  127. if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue,
  128. sizeof(void *),
  129. &target->iu_queue.lock) != sizeof(void *)) {
  130. WARN_ONCE(1, "unexpected fifo state");
  131. return NULL;
  132. }
  133. if (!iue)
  134. return iue;
  135. iue->target = target;
  136. iue->flags = 0;
  137. return iue;
  138. }
  139. void srp_iu_put(struct iu_entry *iue)
  140. {
  141. kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue,
  142. sizeof(void *), &iue->target->iu_queue.lock);
  143. }
  144. static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md,
  145. enum dma_data_direction dir, srp_rdma_t rdma_io,
  146. int dma_map, int ext_desc)
  147. {
  148. struct iu_entry *iue = NULL;
  149. struct scatterlist *sg = NULL;
  150. int err, nsg = 0, len;
  151. if (dma_map) {
  152. iue = cmd->iue;
  153. sg = cmd->se_cmd.t_data_sg;
  154. nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
  155. DMA_BIDIRECTIONAL);
  156. if (!nsg) {
  157. pr_err("fail to map %p %d\n", iue,
  158. cmd->se_cmd.t_data_nents);
  159. return 0;
  160. }
  161. len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len));
  162. } else {
  163. len = be32_to_cpu(md->len);
  164. }
  165. err = rdma_io(cmd, sg, nsg, md, 1, dir, len);
  166. if (dma_map)
  167. dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
  168. return err;
  169. }
  170. static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
  171. struct srp_indirect_buf *id,
  172. enum dma_data_direction dir, srp_rdma_t rdma_io,
  173. int dma_map, int ext_desc)
  174. {
  175. struct iu_entry *iue = NULL;
  176. struct srp_direct_buf *md = NULL;
  177. struct scatterlist dummy, *sg = NULL;
  178. dma_addr_t token = 0;
  179. int err = 0;
  180. int nmd, nsg = 0, len;
  181. if (dma_map || ext_desc) {
  182. iue = cmd->iue;
  183. sg = cmd->se_cmd.t_data_sg;
  184. }
  185. nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf);
  186. if ((dir == DMA_FROM_DEVICE && nmd == srp_cmd->data_in_desc_cnt) ||
  187. (dir == DMA_TO_DEVICE && nmd == srp_cmd->data_out_desc_cnt)) {
  188. md = &id->desc_list[0];
  189. goto rdma;
  190. }
  191. if (ext_desc && dma_map) {
  192. md = dma_alloc_coherent(iue->target->dev,
  193. be32_to_cpu(id->table_desc.len),
  194. &token, GFP_KERNEL);
  195. if (!md) {
  196. pr_err("Can't get dma memory %u\n",
  197. be32_to_cpu(id->table_desc.len));
  198. return -ENOMEM;
  199. }
  200. sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
  201. sg_dma_address(&dummy) = token;
  202. sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len);
  203. err = rdma_io(cmd, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
  204. be32_to_cpu(id->table_desc.len));
  205. if (err) {
  206. pr_err("Error copying indirect table %d\n", err);
  207. goto free_mem;
  208. }
  209. } else {
  210. pr_err("This command uses external indirect buffer\n");
  211. return -EINVAL;
  212. }
  213. rdma:
  214. if (dma_map) {
  215. nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
  216. DMA_BIDIRECTIONAL);
  217. if (!nsg) {
  218. pr_err("fail to map %p %d\n", iue,
  219. cmd->se_cmd.t_data_nents);
  220. err = -EIO;
  221. goto free_mem;
  222. }
  223. len = min(cmd->se_cmd.data_length, be32_to_cpu(id->len));
  224. } else {
  225. len = be32_to_cpu(id->len);
  226. }
  227. err = rdma_io(cmd, sg, nsg, md, nmd, dir, len);
  228. if (dma_map)
  229. dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
  230. free_mem:
  231. if (token && dma_map) {
  232. dma_free_coherent(iue->target->dev,
  233. be32_to_cpu(id->table_desc.len), md, token);
  234. }
  235. return err;
  236. }
  237. static int data_out_desc_size(struct srp_cmd *cmd)
  238. {
  239. int size = 0;
  240. u8 fmt = cmd->buf_fmt >> 4;
  241. switch (fmt) {
  242. case SRP_NO_DATA_DESC:
  243. break;
  244. case SRP_DATA_DESC_DIRECT:
  245. size = sizeof(struct srp_direct_buf);
  246. break;
  247. case SRP_DATA_DESC_INDIRECT:
  248. size = sizeof(struct srp_indirect_buf) +
  249. sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
  250. break;
  251. default:
  252. pr_err("client error. Invalid data_out_format %x\n", fmt);
  253. break;
  254. }
  255. return size;
  256. }
  257. /*
  258. * TODO: this can be called multiple times for a single command if it
  259. * has very long data.
  260. */
  261. int srp_transfer_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
  262. srp_rdma_t rdma_io, int dma_map, int ext_desc)
  263. {
  264. struct srp_direct_buf *md;
  265. struct srp_indirect_buf *id;
  266. enum dma_data_direction dir;
  267. int offset, err = 0;
  268. u8 format;
  269. if (!cmd->se_cmd.t_data_nents)
  270. return 0;
  271. offset = srp_cmd->add_cdb_len & ~3;
  272. dir = srp_cmd_direction(srp_cmd);
  273. if (dir == DMA_FROM_DEVICE)
  274. offset += data_out_desc_size(srp_cmd);
  275. if (dir == DMA_TO_DEVICE)
  276. format = srp_cmd->buf_fmt >> 4;
  277. else
  278. format = srp_cmd->buf_fmt & ((1U << 4) - 1);
  279. switch (format) {
  280. case SRP_NO_DATA_DESC:
  281. break;
  282. case SRP_DATA_DESC_DIRECT:
  283. md = (struct srp_direct_buf *)(srp_cmd->add_data + offset);
  284. err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc);
  285. break;
  286. case SRP_DATA_DESC_INDIRECT:
  287. id = (struct srp_indirect_buf *)(srp_cmd->add_data + offset);
  288. err = srp_indirect_data(cmd, srp_cmd, id, dir, rdma_io, dma_map,
  289. ext_desc);
  290. break;
  291. default:
  292. pr_err("Unknown format %d %x\n", dir, format);
  293. err = -EINVAL;
  294. }
  295. return err;
  296. }
  297. u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
  298. {
  299. struct srp_direct_buf *md;
  300. struct srp_indirect_buf *id;
  301. u64 len = 0;
  302. uint offset = cmd->add_cdb_len & ~3;
  303. u8 fmt;
  304. if (dir == DMA_TO_DEVICE) {
  305. fmt = cmd->buf_fmt >> 4;
  306. } else {
  307. fmt = cmd->buf_fmt & ((1U << 4) - 1);
  308. offset += data_out_desc_size(cmd);
  309. }
  310. switch (fmt) {
  311. case SRP_NO_DATA_DESC:
  312. break;
  313. case SRP_DATA_DESC_DIRECT:
  314. md = (struct srp_direct_buf *)(cmd->add_data + offset);
  315. len = be32_to_cpu(md->len);
  316. break;
  317. case SRP_DATA_DESC_INDIRECT:
  318. id = (struct srp_indirect_buf *)(cmd->add_data + offset);
  319. len = be32_to_cpu(id->len);
  320. break;
  321. default:
  322. pr_err("invalid data format %x\n", fmt);
  323. break;
  324. }
  325. return len;
  326. }
  327. int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
  328. u64 *data_len)
  329. {
  330. struct srp_indirect_buf *idb;
  331. struct srp_direct_buf *db;
  332. uint add_cdb_offset;
  333. int rc;
  334. /*
  335. * The pointer computations below will only be compiled correctly
  336. * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
  337. * whether srp_cmd::add_data has been declared as a byte pointer.
  338. */
  339. BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
  340. && !__same_type(srp_cmd->add_data[0], (u8)0));
  341. BUG_ON(!dir);
  342. BUG_ON(!data_len);
  343. rc = 0;
  344. *data_len = 0;
  345. *dir = DMA_NONE;
  346. if (srp_cmd->buf_fmt & 0xf)
  347. *dir = DMA_FROM_DEVICE;
  348. else if (srp_cmd->buf_fmt >> 4)
  349. *dir = DMA_TO_DEVICE;
  350. add_cdb_offset = srp_cmd->add_cdb_len & ~3;
  351. if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
  352. ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
  353. db = (struct srp_direct_buf *)(srp_cmd->add_data
  354. + add_cdb_offset);
  355. *data_len = be32_to_cpu(db->len);
  356. } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
  357. ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
  358. idb = (struct srp_indirect_buf *)(srp_cmd->add_data
  359. + add_cdb_offset);
  360. *data_len = be32_to_cpu(idb->len);
  361. }
  362. return rc;
  363. }
  364. MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
  365. MODULE_AUTHOR("FUJITA Tomonori");
  366. MODULE_LICENSE("GPL");