c2_cq.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  4. * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
  5. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  6. * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
  7. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  8. *
  9. * This software is available to you under a choice of one of two
  10. * licenses. You may choose to be licensed under the terms of the GNU
  11. * General Public License (GPL) Version 2, available from the file
  12. * COPYING in the main directory of this source tree, or the
  13. * OpenIB.org BSD license below:
  14. *
  15. * Redistribution and use in source and binary forms, with or
  16. * without modification, are permitted provided that the following
  17. * conditions are met:
  18. *
  19. * - Redistributions of source code must retain the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer.
  22. *
  23. * - Redistributions in binary form must reproduce the above
  24. * copyright notice, this list of conditions and the following
  25. * disclaimer in the documentation and/or other materials
  26. * provided with the distribution.
  27. *
  28. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  29. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  30. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  31. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  32. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  33. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  34. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  35. * SOFTWARE.
  36. *
  37. */
  38. #include <linux/gfp.h>
  39. #include "c2.h"
  40. #include "c2_vq.h"
  41. #include "c2_status.h"
  42. #define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
  43. static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
  44. {
  45. struct c2_cq *cq;
  46. unsigned long flags;
  47. spin_lock_irqsave(&c2dev->lock, flags);
  48. cq = c2dev->qptr_array[cqn];
  49. if (!cq) {
  50. spin_unlock_irqrestore(&c2dev->lock, flags);
  51. return NULL;
  52. }
  53. atomic_inc(&cq->refcount);
  54. spin_unlock_irqrestore(&c2dev->lock, flags);
  55. return cq;
  56. }
  57. static void c2_cq_put(struct c2_cq *cq)
  58. {
  59. if (atomic_dec_and_test(&cq->refcount))
  60. wake_up(&cq->wait);
  61. }
  62. void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
  63. {
  64. struct c2_cq *cq;
  65. cq = c2_cq_get(c2dev, mq_index);
  66. if (!cq) {
  67. printk("discarding events on destroyed CQN=%d\n", mq_index);
  68. return;
  69. }
  70. (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
  71. c2_cq_put(cq);
  72. }
  73. void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
  74. {
  75. struct c2_cq *cq;
  76. struct c2_mq *q;
  77. cq = c2_cq_get(c2dev, mq_index);
  78. if (!cq)
  79. return;
  80. spin_lock_irq(&cq->lock);
  81. q = &cq->mq;
  82. if (q && !c2_mq_empty(q)) {
  83. u16 priv = q->priv;
  84. struct c2wr_ce *msg;
  85. while (priv != be16_to_cpu(*q->shared)) {
  86. msg = (struct c2wr_ce *)
  87. (q->msg_pool.host + priv * q->msg_size);
  88. if (msg->qp_user_context == (u64) (unsigned long) qp) {
  89. msg->qp_user_context = (u64) 0;
  90. }
  91. priv = (priv + 1) % q->q_size;
  92. }
  93. }
  94. spin_unlock_irq(&cq->lock);
  95. c2_cq_put(cq);
  96. }
  97. static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
  98. {
  99. switch (status) {
  100. case C2_OK:
  101. return IB_WC_SUCCESS;
  102. case CCERR_FLUSHED:
  103. return IB_WC_WR_FLUSH_ERR;
  104. case CCERR_BASE_AND_BOUNDS_VIOLATION:
  105. return IB_WC_LOC_PROT_ERR;
  106. case CCERR_ACCESS_VIOLATION:
  107. return IB_WC_LOC_ACCESS_ERR;
  108. case CCERR_TOTAL_LENGTH_TOO_BIG:
  109. return IB_WC_LOC_LEN_ERR;
  110. case CCERR_INVALID_WINDOW:
  111. return IB_WC_MW_BIND_ERR;
  112. default:
  113. return IB_WC_GENERAL_ERR;
  114. }
  115. }
  116. static inline int c2_poll_one(struct c2_dev *c2dev,
  117. struct c2_cq *cq, struct ib_wc *entry)
  118. {
  119. struct c2wr_ce *ce;
  120. struct c2_qp *qp;
  121. int is_recv = 0;
  122. ce = c2_mq_consume(&cq->mq);
  123. if (!ce) {
  124. return -EAGAIN;
  125. }
  126. /*
  127. * if the qp returned is null then this qp has already
  128. * been freed and we are unable process the completion.
  129. * try pulling the next message
  130. */
  131. while ((qp =
  132. (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
  133. c2_mq_free(&cq->mq);
  134. ce = c2_mq_consume(&cq->mq);
  135. if (!ce)
  136. return -EAGAIN;
  137. }
  138. entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
  139. entry->wr_id = ce->hdr.context;
  140. entry->qp = &qp->ibqp;
  141. entry->wc_flags = 0;
  142. entry->slid = 0;
  143. entry->sl = 0;
  144. entry->src_qp = 0;
  145. entry->dlid_path_bits = 0;
  146. entry->pkey_index = 0;
  147. switch (c2_wr_get_id(ce)) {
  148. case C2_WR_TYPE_SEND:
  149. entry->opcode = IB_WC_SEND;
  150. break;
  151. case C2_WR_TYPE_RDMA_WRITE:
  152. entry->opcode = IB_WC_RDMA_WRITE;
  153. break;
  154. case C2_WR_TYPE_RDMA_READ:
  155. entry->opcode = IB_WC_RDMA_READ;
  156. break;
  157. case C2_WR_TYPE_BIND_MW:
  158. entry->opcode = IB_WC_BIND_MW;
  159. break;
  160. case C2_WR_TYPE_RECV:
  161. entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
  162. entry->opcode = IB_WC_RECV;
  163. is_recv = 1;
  164. break;
  165. default:
  166. break;
  167. }
  168. /* consume the WQEs */
  169. if (is_recv)
  170. c2_mq_lconsume(&qp->rq_mq, 1);
  171. else
  172. c2_mq_lconsume(&qp->sq_mq,
  173. be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
  174. /* free the message */
  175. c2_mq_free(&cq->mq);
  176. return 0;
  177. }
  178. int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
  179. {
  180. struct c2_dev *c2dev = to_c2dev(ibcq->device);
  181. struct c2_cq *cq = to_c2cq(ibcq);
  182. unsigned long flags;
  183. int npolled, err;
  184. spin_lock_irqsave(&cq->lock, flags);
  185. for (npolled = 0; npolled < num_entries; ++npolled) {
  186. err = c2_poll_one(c2dev, cq, entry + npolled);
  187. if (err)
  188. break;
  189. }
  190. spin_unlock_irqrestore(&cq->lock, flags);
  191. return npolled;
  192. }
  193. int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
  194. {
  195. struct c2_mq_shared __iomem *shared;
  196. struct c2_cq *cq;
  197. unsigned long flags;
  198. int ret = 0;
  199. cq = to_c2cq(ibcq);
  200. shared = cq->mq.peer;
  201. if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
  202. writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
  203. else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
  204. writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
  205. else
  206. return -EINVAL;
  207. writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
  208. /*
  209. * Now read back shared->armed to make the PCI
  210. * write synchronous. This is necessary for
  211. * correct cq notification semantics.
  212. */
  213. readb(&shared->armed);
  214. if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
  215. spin_lock_irqsave(&cq->lock, flags);
  216. ret = !c2_mq_empty(&cq->mq);
  217. spin_unlock_irqrestore(&cq->lock, flags);
  218. }
  219. return ret;
  220. }
  221. static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
  222. {
  223. dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
  224. mq->msg_pool.host, dma_unmap_addr(mq, mapping));
  225. }
  226. static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
  227. int msg_size)
  228. {
  229. u8 *pool_start;
  230. pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
  231. &mq->host_dma, GFP_KERNEL);
  232. if (!pool_start)
  233. return -ENOMEM;
  234. c2_mq_rep_init(mq,
  235. 0, /* index (currently unknown) */
  236. q_size,
  237. msg_size,
  238. pool_start,
  239. NULL, /* peer (currently unknown) */
  240. C2_MQ_HOST_TARGET);
  241. dma_unmap_addr_set(mq, mapping, mq->host_dma);
  242. return 0;
  243. }
  244. int c2_init_cq(struct c2_dev *c2dev, int entries,
  245. struct c2_ucontext *ctx, struct c2_cq *cq)
  246. {
  247. struct c2wr_cq_create_req wr;
  248. struct c2wr_cq_create_rep *reply;
  249. unsigned long peer_pa;
  250. struct c2_vq_req *vq_req;
  251. int err;
  252. might_sleep();
  253. cq->ibcq.cqe = entries - 1;
  254. cq->is_kernel = !ctx;
  255. /* Allocate a shared pointer */
  256. cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
  257. &cq->mq.shared_dma, GFP_KERNEL);
  258. if (!cq->mq.shared)
  259. return -ENOMEM;
  260. /* Allocate pages for the message pool */
  261. err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
  262. if (err)
  263. goto bail0;
  264. vq_req = vq_req_alloc(c2dev);
  265. if (!vq_req) {
  266. err = -ENOMEM;
  267. goto bail1;
  268. }
  269. memset(&wr, 0, sizeof(wr));
  270. c2_wr_set_id(&wr, CCWR_CQ_CREATE);
  271. wr.hdr.context = (unsigned long) vq_req;
  272. wr.rnic_handle = c2dev->adapter_handle;
  273. wr.msg_size = cpu_to_be32(cq->mq.msg_size);
  274. wr.depth = cpu_to_be32(cq->mq.q_size);
  275. wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
  276. wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
  277. wr.user_context = (u64) (unsigned long) (cq);
  278. vq_req_get(c2dev, vq_req);
  279. err = vq_send_wr(c2dev, (union c2wr *) & wr);
  280. if (err) {
  281. vq_req_put(c2dev, vq_req);
  282. goto bail2;
  283. }
  284. err = vq_wait_for_reply(c2dev, vq_req);
  285. if (err)
  286. goto bail2;
  287. reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
  288. if (!reply) {
  289. err = -ENOMEM;
  290. goto bail2;
  291. }
  292. if ((err = c2_errno(reply)) != 0)
  293. goto bail3;
  294. cq->adapter_handle = reply->cq_handle;
  295. cq->mq.index = be32_to_cpu(reply->mq_index);
  296. peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
  297. cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
  298. if (!cq->mq.peer) {
  299. err = -ENOMEM;
  300. goto bail3;
  301. }
  302. vq_repbuf_free(c2dev, reply);
  303. vq_req_free(c2dev, vq_req);
  304. spin_lock_init(&cq->lock);
  305. atomic_set(&cq->refcount, 1);
  306. init_waitqueue_head(&cq->wait);
  307. /*
  308. * Use the MQ index allocated by the adapter to
  309. * store the CQ in the qptr_array
  310. */
  311. cq->cqn = cq->mq.index;
  312. c2dev->qptr_array[cq->cqn] = cq;
  313. return 0;
  314. bail3:
  315. vq_repbuf_free(c2dev, reply);
  316. bail2:
  317. vq_req_free(c2dev, vq_req);
  318. bail1:
  319. c2_free_cq_buf(c2dev, &cq->mq);
  320. bail0:
  321. c2_free_mqsp(cq->mq.shared);
  322. return err;
  323. }
  324. void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
  325. {
  326. int err;
  327. struct c2_vq_req *vq_req;
  328. struct c2wr_cq_destroy_req wr;
  329. struct c2wr_cq_destroy_rep *reply;
  330. might_sleep();
  331. /* Clear CQ from the qptr array */
  332. spin_lock_irq(&c2dev->lock);
  333. c2dev->qptr_array[cq->mq.index] = NULL;
  334. atomic_dec(&cq->refcount);
  335. spin_unlock_irq(&c2dev->lock);
  336. wait_event(cq->wait, !atomic_read(&cq->refcount));
  337. vq_req = vq_req_alloc(c2dev);
  338. if (!vq_req) {
  339. goto bail0;
  340. }
  341. memset(&wr, 0, sizeof(wr));
  342. c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
  343. wr.hdr.context = (unsigned long) vq_req;
  344. wr.rnic_handle = c2dev->adapter_handle;
  345. wr.cq_handle = cq->adapter_handle;
  346. vq_req_get(c2dev, vq_req);
  347. err = vq_send_wr(c2dev, (union c2wr *) & wr);
  348. if (err) {
  349. vq_req_put(c2dev, vq_req);
  350. goto bail1;
  351. }
  352. err = vq_wait_for_reply(c2dev, vq_req);
  353. if (err)
  354. goto bail1;
  355. reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
  356. if (reply)
  357. vq_repbuf_free(c2dev, reply);
  358. bail1:
  359. vq_req_free(c2dev, vq_req);
  360. bail0:
  361. if (cq->is_kernel) {
  362. c2_free_cq_buf(c2dev, &cq->mq);
  363. }
  364. return;
  365. }