ib_frmr.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. /*
  2. * Copyright (c) 2016 Oracle. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include "ib_mr.h"
  33. static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
  34. int npages)
  35. {
  36. struct rds_ib_mr_pool *pool;
  37. struct rds_ib_mr *ibmr = NULL;
  38. struct rds_ib_frmr *frmr;
  39. int err = 0;
  40. if (npages <= RDS_MR_8K_MSG_SIZE)
  41. pool = rds_ibdev->mr_8k_pool;
  42. else
  43. pool = rds_ibdev->mr_1m_pool;
  44. ibmr = rds_ib_try_reuse_ibmr(pool);
  45. if (ibmr)
  46. return ibmr;
  47. ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
  48. rdsibdev_to_node(rds_ibdev));
  49. if (!ibmr) {
  50. err = -ENOMEM;
  51. goto out_no_cigar;
  52. }
  53. frmr = &ibmr->u.frmr;
  54. frmr->mr = ib_alloc_mr(rds_ibdev->pd, IB_MR_TYPE_MEM_REG,
  55. pool->fmr_attr.max_pages);
  56. if (IS_ERR(frmr->mr)) {
  57. pr_warn("RDS/IB: %s failed to allocate MR", __func__);
  58. err = PTR_ERR(frmr->mr);
  59. goto out_no_cigar;
  60. }
  61. ibmr->pool = pool;
  62. if (pool->pool_type == RDS_IB_MR_8K_POOL)
  63. rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
  64. else
  65. rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
  66. if (atomic_read(&pool->item_count) > pool->max_items_soft)
  67. pool->max_items_soft = pool->max_items;
  68. frmr->fr_state = FRMR_IS_FREE;
  69. return ibmr;
  70. out_no_cigar:
  71. kfree(ibmr);
  72. atomic_dec(&pool->item_count);
  73. return ERR_PTR(err);
  74. }
  75. static void rds_ib_free_frmr(struct rds_ib_mr *ibmr, bool drop)
  76. {
  77. struct rds_ib_mr_pool *pool = ibmr->pool;
  78. if (drop)
  79. llist_add(&ibmr->llnode, &pool->drop_list);
  80. else
  81. llist_add(&ibmr->llnode, &pool->free_list);
  82. atomic_add(ibmr->sg_len, &pool->free_pinned);
  83. atomic_inc(&pool->dirty_count);
  84. /* If we've pinned too many pages, request a flush */
  85. if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
  86. atomic_read(&pool->dirty_count) >= pool->max_items / 5)
  87. queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
  88. }
  89. static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
  90. {
  91. struct rds_ib_frmr *frmr = &ibmr->u.frmr;
  92. struct ib_send_wr *failed_wr;
  93. struct ib_reg_wr reg_wr;
  94. int ret, off = 0;
  95. while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
  96. atomic_inc(&ibmr->ic->i_fastreg_wrs);
  97. cpu_relax();
  98. }
  99. ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
  100. &off, PAGE_SIZE);
  101. if (unlikely(ret != ibmr->sg_len))
  102. return ret < 0 ? ret : -EINVAL;
  103. /* Perform a WR for the fast_reg_mr. Each individual page
  104. * in the sg list is added to the fast reg page list and placed
  105. * inside the fast_reg_mr WR. The key used is a rolling 8bit
  106. * counter, which should guarantee uniqueness.
  107. */
  108. ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++);
  109. frmr->fr_state = FRMR_IS_INUSE;
  110. memset(&reg_wr, 0, sizeof(reg_wr));
  111. reg_wr.wr.wr_id = (unsigned long)(void *)ibmr;
  112. reg_wr.wr.opcode = IB_WR_REG_MR;
  113. reg_wr.wr.num_sge = 0;
  114. reg_wr.mr = frmr->mr;
  115. reg_wr.key = frmr->mr->rkey;
  116. reg_wr.access = IB_ACCESS_LOCAL_WRITE |
  117. IB_ACCESS_REMOTE_READ |
  118. IB_ACCESS_REMOTE_WRITE;
  119. reg_wr.wr.send_flags = IB_SEND_SIGNALED;
  120. failed_wr = &reg_wr.wr;
  121. ret = ib_post_send(ibmr->ic->i_cm_id->qp, &reg_wr.wr, &failed_wr);
  122. WARN_ON(failed_wr != &reg_wr.wr);
  123. if (unlikely(ret)) {
  124. /* Failure here can be because of -ENOMEM as well */
  125. frmr->fr_state = FRMR_IS_STALE;
  126. atomic_inc(&ibmr->ic->i_fastreg_wrs);
  127. if (printk_ratelimit())
  128. pr_warn("RDS/IB: %s returned error(%d)\n",
  129. __func__, ret);
  130. }
  131. return ret;
  132. }
  133. static int rds_ib_map_frmr(struct rds_ib_device *rds_ibdev,
  134. struct rds_ib_mr_pool *pool,
  135. struct rds_ib_mr *ibmr,
  136. struct scatterlist *sg, unsigned int sg_len)
  137. {
  138. struct ib_device *dev = rds_ibdev->dev;
  139. struct rds_ib_frmr *frmr = &ibmr->u.frmr;
  140. int i;
  141. u32 len;
  142. int ret = 0;
  143. /* We want to teardown old ibmr values here and fill it up with
  144. * new sg values
  145. */
  146. rds_ib_teardown_mr(ibmr);
  147. ibmr->sg = sg;
  148. ibmr->sg_len = sg_len;
  149. ibmr->sg_dma_len = 0;
  150. frmr->sg_byte_len = 0;
  151. WARN_ON(ibmr->sg_dma_len);
  152. ibmr->sg_dma_len = ib_dma_map_sg(dev, ibmr->sg, ibmr->sg_len,
  153. DMA_BIDIRECTIONAL);
  154. if (unlikely(!ibmr->sg_dma_len)) {
  155. pr_warn("RDS/IB: %s failed!\n", __func__);
  156. return -EBUSY;
  157. }
  158. frmr->sg_byte_len = 0;
  159. frmr->dma_npages = 0;
  160. len = 0;
  161. ret = -EINVAL;
  162. for (i = 0; i < ibmr->sg_dma_len; ++i) {
  163. unsigned int dma_len = ib_sg_dma_len(dev, &ibmr->sg[i]);
  164. u64 dma_addr = ib_sg_dma_address(dev, &ibmr->sg[i]);
  165. frmr->sg_byte_len += dma_len;
  166. if (dma_addr & ~PAGE_MASK) {
  167. if (i > 0)
  168. goto out_unmap;
  169. else
  170. ++frmr->dma_npages;
  171. }
  172. if ((dma_addr + dma_len) & ~PAGE_MASK) {
  173. if (i < ibmr->sg_dma_len - 1)
  174. goto out_unmap;
  175. else
  176. ++frmr->dma_npages;
  177. }
  178. len += dma_len;
  179. }
  180. frmr->dma_npages += len >> PAGE_SHIFT;
  181. if (frmr->dma_npages > ibmr->pool->fmr_attr.max_pages) {
  182. ret = -EMSGSIZE;
  183. goto out_unmap;
  184. }
  185. ret = rds_ib_post_reg_frmr(ibmr);
  186. if (ret)
  187. goto out_unmap;
  188. if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
  189. rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
  190. else
  191. rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
  192. return ret;
  193. out_unmap:
  194. ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len,
  195. DMA_BIDIRECTIONAL);
  196. ibmr->sg_dma_len = 0;
  197. return ret;
  198. }
  199. static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
  200. {
  201. struct ib_send_wr *s_wr, *failed_wr;
  202. struct rds_ib_frmr *frmr = &ibmr->u.frmr;
  203. struct rdma_cm_id *i_cm_id = ibmr->ic->i_cm_id;
  204. int ret = -EINVAL;
  205. if (!i_cm_id || !i_cm_id->qp || !frmr->mr)
  206. goto out;
  207. if (frmr->fr_state != FRMR_IS_INUSE)
  208. goto out;
  209. while (atomic_dec_return(&ibmr->ic->i_fastunreg_wrs) <= 0) {
  210. atomic_inc(&ibmr->ic->i_fastunreg_wrs);
  211. cpu_relax();
  212. }
  213. frmr->fr_inv = true;
  214. s_wr = &frmr->fr_wr;
  215. memset(s_wr, 0, sizeof(*s_wr));
  216. s_wr->wr_id = (unsigned long)(void *)ibmr;
  217. s_wr->opcode = IB_WR_LOCAL_INV;
  218. s_wr->ex.invalidate_rkey = frmr->mr->rkey;
  219. s_wr->send_flags = IB_SEND_SIGNALED;
  220. failed_wr = s_wr;
  221. ret = ib_post_send(i_cm_id->qp, s_wr, &failed_wr);
  222. WARN_ON(failed_wr != s_wr);
  223. if (unlikely(ret)) {
  224. frmr->fr_state = FRMR_IS_STALE;
  225. frmr->fr_inv = false;
  226. atomic_inc(&ibmr->ic->i_fastunreg_wrs);
  227. pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
  228. goto out;
  229. }
  230. out:
  231. return ret;
  232. }
  233. void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
  234. {
  235. struct rds_ib_mr *ibmr = (void *)(unsigned long)wc->wr_id;
  236. struct rds_ib_frmr *frmr = &ibmr->u.frmr;
  237. if (wc->status != IB_WC_SUCCESS) {
  238. frmr->fr_state = FRMR_IS_STALE;
  239. if (rds_conn_up(ic->conn))
  240. rds_ib_conn_error(ic->conn,
  241. "frmr completion <%pI4,%pI4> status %u(%s), vendor_err 0x%x, disconnecting and reconnecting\n",
  242. &ic->conn->c_laddr,
  243. &ic->conn->c_faddr,
  244. wc->status,
  245. ib_wc_status_msg(wc->status),
  246. wc->vendor_err);
  247. }
  248. if (frmr->fr_inv) {
  249. frmr->fr_state = FRMR_IS_FREE;
  250. frmr->fr_inv = false;
  251. atomic_inc(&ic->i_fastreg_wrs);
  252. } else {
  253. atomic_inc(&ic->i_fastunreg_wrs);
  254. }
  255. }
  256. void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
  257. unsigned long *unpinned, unsigned int goal)
  258. {
  259. struct rds_ib_mr *ibmr, *next;
  260. struct rds_ib_frmr *frmr;
  261. int ret = 0;
  262. unsigned int freed = *nfreed;
  263. /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
  264. list_for_each_entry(ibmr, list, unmap_list) {
  265. if (ibmr->sg_dma_len)
  266. ret |= rds_ib_post_inv(ibmr);
  267. }
  268. if (ret)
  269. pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, ret);
  270. /* Now we can destroy the DMA mapping and unpin any pages */
  271. list_for_each_entry_safe(ibmr, next, list, unmap_list) {
  272. *unpinned += ibmr->sg_len;
  273. frmr = &ibmr->u.frmr;
  274. __rds_ib_teardown_mr(ibmr);
  275. if (freed < goal || frmr->fr_state == FRMR_IS_STALE) {
  276. /* Don't de-allocate if the MR is not free yet */
  277. if (frmr->fr_state == FRMR_IS_INUSE)
  278. continue;
  279. if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
  280. rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
  281. else
  282. rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
  283. list_del(&ibmr->unmap_list);
  284. if (frmr->mr)
  285. ib_dereg_mr(frmr->mr);
  286. kfree(ibmr);
  287. freed++;
  288. }
  289. }
  290. *nfreed = freed;
  291. }
  292. struct rds_ib_mr *rds_ib_reg_frmr(struct rds_ib_device *rds_ibdev,
  293. struct rds_ib_connection *ic,
  294. struct scatterlist *sg,
  295. unsigned long nents, u32 *key)
  296. {
  297. struct rds_ib_mr *ibmr = NULL;
  298. struct rds_ib_frmr *frmr;
  299. int ret;
  300. do {
  301. if (ibmr)
  302. rds_ib_free_frmr(ibmr, true);
  303. ibmr = rds_ib_alloc_frmr(rds_ibdev, nents);
  304. if (IS_ERR(ibmr))
  305. return ibmr;
  306. frmr = &ibmr->u.frmr;
  307. } while (frmr->fr_state != FRMR_IS_FREE);
  308. ibmr->ic = ic;
  309. ibmr->device = rds_ibdev;
  310. ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents);
  311. if (ret == 0) {
  312. *key = frmr->mr->rkey;
  313. } else {
  314. rds_ib_free_frmr(ibmr, false);
  315. ibmr = ERR_PTR(ret);
  316. }
  317. return ibmr;
  318. }
  319. void rds_ib_free_frmr_list(struct rds_ib_mr *ibmr)
  320. {
  321. struct rds_ib_mr_pool *pool = ibmr->pool;
  322. struct rds_ib_frmr *frmr = &ibmr->u.frmr;
  323. if (frmr->fr_state == FRMR_IS_STALE)
  324. llist_add(&ibmr->llnode, &pool->drop_list);
  325. else
  326. llist_add(&ibmr->llnode, &pool->free_list);
  327. }