qib_ruc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823
  1. /*
  2. * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/spinlock.h>
  34. #include "qib.h"
  35. #include "qib_mad.h"
  36. /*
  37. * Convert the AETH RNR timeout code into the number of microseconds.
  38. */
  39. const u32 ib_qib_rnr_table[32] = {
  40. 655360, /* 00: 655.36 */
  41. 10, /* 01: .01 */
  42. 20, /* 02 .02 */
  43. 30, /* 03: .03 */
  44. 40, /* 04: .04 */
  45. 60, /* 05: .06 */
  46. 80, /* 06: .08 */
  47. 120, /* 07: .12 */
  48. 160, /* 08: .16 */
  49. 240, /* 09: .24 */
  50. 320, /* 0A: .32 */
  51. 480, /* 0B: .48 */
  52. 640, /* 0C: .64 */
  53. 960, /* 0D: .96 */
  54. 1280, /* 0E: 1.28 */
  55. 1920, /* 0F: 1.92 */
  56. 2560, /* 10: 2.56 */
  57. 3840, /* 11: 3.84 */
  58. 5120, /* 12: 5.12 */
  59. 7680, /* 13: 7.68 */
  60. 10240, /* 14: 10.24 */
  61. 15360, /* 15: 15.36 */
  62. 20480, /* 16: 20.48 */
  63. 30720, /* 17: 30.72 */
  64. 40960, /* 18: 40.96 */
  65. 61440, /* 19: 61.44 */
  66. 81920, /* 1A: 81.92 */
  67. 122880, /* 1B: 122.88 */
  68. 163840, /* 1C: 163.84 */
  69. 245760, /* 1D: 245.76 */
  70. 327680, /* 1E: 327.68 */
  71. 491520 /* 1F: 491.52 */
  72. };
  73. /*
  74. * Validate a RWQE and fill in the SGE state.
  75. * Return 1 if OK.
  76. */
  77. static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
  78. {
  79. int i, j, ret;
  80. struct ib_wc wc;
  81. struct qib_lkey_table *rkt;
  82. struct qib_pd *pd;
  83. struct qib_sge_state *ss;
  84. rkt = &to_idev(qp->ibqp.device)->lk_table;
  85. pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
  86. ss = &qp->r_sge;
  87. ss->sg_list = qp->r_sg_list;
  88. qp->r_len = 0;
  89. for (i = j = 0; i < wqe->num_sge; i++) {
  90. if (wqe->sg_list[i].length == 0)
  91. continue;
  92. /* Check LKEY */
  93. if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
  94. &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
  95. goto bad_lkey;
  96. qp->r_len += wqe->sg_list[i].length;
  97. j++;
  98. }
  99. ss->num_sge = j;
  100. ss->total_len = qp->r_len;
  101. ret = 1;
  102. goto bail;
  103. bad_lkey:
  104. while (j) {
  105. struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
  106. atomic_dec(&sge->mr->refcount);
  107. }
  108. ss->num_sge = 0;
  109. memset(&wc, 0, sizeof(wc));
  110. wc.wr_id = wqe->wr_id;
  111. wc.status = IB_WC_LOC_PROT_ERR;
  112. wc.opcode = IB_WC_RECV;
  113. wc.qp = &qp->ibqp;
  114. /* Signal solicited completion event. */
  115. qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
  116. ret = 0;
  117. bail:
  118. return ret;
  119. }
  120. /**
  121. * qib_get_rwqe - copy the next RWQE into the QP's RWQE
  122. * @qp: the QP
  123. * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
  124. *
  125. * Return -1 if there is a local error, 0 if no RWQE is available,
  126. * otherwise return 1.
  127. *
  128. * Can be called from interrupt level.
  129. */
  130. int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
  131. {
  132. unsigned long flags;
  133. struct qib_rq *rq;
  134. struct qib_rwq *wq;
  135. struct qib_srq *srq;
  136. struct qib_rwqe *wqe;
  137. void (*handler)(struct ib_event *, void *);
  138. u32 tail;
  139. int ret;
  140. if (qp->ibqp.srq) {
  141. srq = to_isrq(qp->ibqp.srq);
  142. handler = srq->ibsrq.event_handler;
  143. rq = &srq->rq;
  144. } else {
  145. srq = NULL;
  146. handler = NULL;
  147. rq = &qp->r_rq;
  148. }
  149. spin_lock_irqsave(&rq->lock, flags);
  150. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
  151. ret = 0;
  152. goto unlock;
  153. }
  154. wq = rq->wq;
  155. tail = wq->tail;
  156. /* Validate tail before using it since it is user writable. */
  157. if (tail >= rq->size)
  158. tail = 0;
  159. if (unlikely(tail == wq->head)) {
  160. ret = 0;
  161. goto unlock;
  162. }
  163. /* Make sure entry is read after head index is read. */
  164. smp_rmb();
  165. wqe = get_rwqe_ptr(rq, tail);
  166. /*
  167. * Even though we update the tail index in memory, the verbs
  168. * consumer is not supposed to post more entries until a
  169. * completion is generated.
  170. */
  171. if (++tail >= rq->size)
  172. tail = 0;
  173. wq->tail = tail;
  174. if (!wr_id_only && !qib_init_sge(qp, wqe)) {
  175. ret = -1;
  176. goto unlock;
  177. }
  178. qp->r_wr_id = wqe->wr_id;
  179. ret = 1;
  180. set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
  181. if (handler) {
  182. u32 n;
  183. /*
  184. * Validate head pointer value and compute
  185. * the number of remaining WQEs.
  186. */
  187. n = wq->head;
  188. if (n >= rq->size)
  189. n = 0;
  190. if (n < tail)
  191. n += rq->size - tail;
  192. else
  193. n -= tail;
  194. if (n < srq->limit) {
  195. struct ib_event ev;
  196. srq->limit = 0;
  197. spin_unlock_irqrestore(&rq->lock, flags);
  198. ev.device = qp->ibqp.device;
  199. ev.element.srq = qp->ibqp.srq;
  200. ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
  201. handler(&ev, srq->ibsrq.srq_context);
  202. goto bail;
  203. }
  204. }
  205. unlock:
  206. spin_unlock_irqrestore(&rq->lock, flags);
  207. bail:
  208. return ret;
  209. }
  210. /*
  211. * Switch to alternate path.
  212. * The QP s_lock should be held and interrupts disabled.
  213. */
  214. void qib_migrate_qp(struct qib_qp *qp)
  215. {
  216. struct ib_event ev;
  217. qp->s_mig_state = IB_MIG_MIGRATED;
  218. qp->remote_ah_attr = qp->alt_ah_attr;
  219. qp->port_num = qp->alt_ah_attr.port_num;
  220. qp->s_pkey_index = qp->s_alt_pkey_index;
  221. ev.device = qp->ibqp.device;
  222. ev.element.qp = &qp->ibqp;
  223. ev.event = IB_EVENT_PATH_MIG;
  224. qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
  225. }
  226. static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
  227. {
  228. if (!index) {
  229. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  230. return ppd->guid;
  231. } else
  232. return ibp->guids[index - 1];
  233. }
  234. static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
  235. {
  236. return (gid->global.interface_id == id &&
  237. (gid->global.subnet_prefix == gid_prefix ||
  238. gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
  239. }
  240. /*
  241. *
  242. * This should be called with the QP r_lock held.
  243. *
  244. * The s_lock will be acquired around the qib_migrate_qp() call.
  245. */
  246. int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
  247. int has_grh, struct qib_qp *qp, u32 bth0)
  248. {
  249. __be64 guid;
  250. unsigned long flags;
  251. if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
  252. if (!has_grh) {
  253. if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
  254. goto err;
  255. } else {
  256. if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
  257. goto err;
  258. guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
  259. if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
  260. goto err;
  261. if (!gid_ok(&hdr->u.l.grh.sgid,
  262. qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
  263. qp->alt_ah_attr.grh.dgid.global.interface_id))
  264. goto err;
  265. }
  266. if (!qib_pkey_ok((u16)bth0,
  267. qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
  268. qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
  269. (u16)bth0,
  270. (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
  271. 0, qp->ibqp.qp_num,
  272. hdr->lrh[3], hdr->lrh[1]);
  273. goto err;
  274. }
  275. /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
  276. if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
  277. ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
  278. goto err;
  279. spin_lock_irqsave(&qp->s_lock, flags);
  280. qib_migrate_qp(qp);
  281. spin_unlock_irqrestore(&qp->s_lock, flags);
  282. } else {
  283. if (!has_grh) {
  284. if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
  285. goto err;
  286. } else {
  287. if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
  288. goto err;
  289. guid = get_sguid(ibp,
  290. qp->remote_ah_attr.grh.sgid_index);
  291. if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
  292. goto err;
  293. if (!gid_ok(&hdr->u.l.grh.sgid,
  294. qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
  295. qp->remote_ah_attr.grh.dgid.global.interface_id))
  296. goto err;
  297. }
  298. if (!qib_pkey_ok((u16)bth0,
  299. qib_get_pkey(ibp, qp->s_pkey_index))) {
  300. qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
  301. (u16)bth0,
  302. (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
  303. 0, qp->ibqp.qp_num,
  304. hdr->lrh[3], hdr->lrh[1]);
  305. goto err;
  306. }
  307. /* Validate the SLID. See Ch. 9.6.1.5 */
  308. if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
  309. ppd_from_ibp(ibp)->port != qp->port_num)
  310. goto err;
  311. if (qp->s_mig_state == IB_MIG_REARM &&
  312. !(bth0 & IB_BTH_MIG_REQ))
  313. qp->s_mig_state = IB_MIG_ARMED;
  314. }
  315. return 0;
  316. err:
  317. return 1;
  318. }
  319. /**
  320. * qib_ruc_loopback - handle UC and RC lookback requests
  321. * @sqp: the sending QP
  322. *
  323. * This is called from qib_do_send() to
  324. * forward a WQE addressed to the same HCA.
  325. * Note that although we are single threaded due to the tasklet, we still
  326. * have to protect against post_send(). We don't have to worry about
  327. * receive interrupts since this is a connected protocol and all packets
  328. * will pass through here.
  329. */
  330. static void qib_ruc_loopback(struct qib_qp *sqp)
  331. {
  332. struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
  333. struct qib_qp *qp;
  334. struct qib_swqe *wqe;
  335. struct qib_sge *sge;
  336. unsigned long flags;
  337. struct ib_wc wc;
  338. u64 sdata;
  339. atomic64_t *maddr;
  340. enum ib_wc_status send_status;
  341. int release;
  342. int ret;
  343. /*
  344. * Note that we check the responder QP state after
  345. * checking the requester's state.
  346. */
  347. qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
  348. spin_lock_irqsave(&sqp->s_lock, flags);
  349. /* Return if we are already busy processing a work request. */
  350. if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
  351. !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
  352. goto unlock;
  353. sqp->s_flags |= QIB_S_BUSY;
  354. again:
  355. if (sqp->s_last == sqp->s_head)
  356. goto clr_busy;
  357. wqe = get_swqe_ptr(sqp, sqp->s_last);
  358. /* Return if it is not OK to start a new work reqeust. */
  359. if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
  360. if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
  361. goto clr_busy;
  362. /* We are in the error state, flush the work request. */
  363. send_status = IB_WC_WR_FLUSH_ERR;
  364. goto flush_send;
  365. }
  366. /*
  367. * We can rely on the entry not changing without the s_lock
  368. * being held until we update s_last.
  369. * We increment s_cur to indicate s_last is in progress.
  370. */
  371. if (sqp->s_last == sqp->s_cur) {
  372. if (++sqp->s_cur >= sqp->s_size)
  373. sqp->s_cur = 0;
  374. }
  375. spin_unlock_irqrestore(&sqp->s_lock, flags);
  376. if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
  377. qp->ibqp.qp_type != sqp->ibqp.qp_type) {
  378. ibp->n_pkt_drops++;
  379. /*
  380. * For RC, the requester would timeout and retry so
  381. * shortcut the timeouts and just signal too many retries.
  382. */
  383. if (sqp->ibqp.qp_type == IB_QPT_RC)
  384. send_status = IB_WC_RETRY_EXC_ERR;
  385. else
  386. send_status = IB_WC_SUCCESS;
  387. goto serr;
  388. }
  389. memset(&wc, 0, sizeof wc);
  390. send_status = IB_WC_SUCCESS;
  391. release = 1;
  392. sqp->s_sge.sge = wqe->sg_list[0];
  393. sqp->s_sge.sg_list = wqe->sg_list + 1;
  394. sqp->s_sge.num_sge = wqe->wr.num_sge;
  395. sqp->s_len = wqe->length;
  396. switch (wqe->wr.opcode) {
  397. case IB_WR_SEND_WITH_IMM:
  398. wc.wc_flags = IB_WC_WITH_IMM;
  399. wc.ex.imm_data = wqe->wr.ex.imm_data;
  400. /* FALLTHROUGH */
  401. case IB_WR_SEND:
  402. ret = qib_get_rwqe(qp, 0);
  403. if (ret < 0)
  404. goto op_err;
  405. if (!ret)
  406. goto rnr_nak;
  407. break;
  408. case IB_WR_RDMA_WRITE_WITH_IMM:
  409. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
  410. goto inv_err;
  411. wc.wc_flags = IB_WC_WITH_IMM;
  412. wc.ex.imm_data = wqe->wr.ex.imm_data;
  413. ret = qib_get_rwqe(qp, 1);
  414. if (ret < 0)
  415. goto op_err;
  416. if (!ret)
  417. goto rnr_nak;
  418. /* FALLTHROUGH */
  419. case IB_WR_RDMA_WRITE:
  420. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
  421. goto inv_err;
  422. if (wqe->length == 0)
  423. break;
  424. if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
  425. wqe->wr.wr.rdma.remote_addr,
  426. wqe->wr.wr.rdma.rkey,
  427. IB_ACCESS_REMOTE_WRITE)))
  428. goto acc_err;
  429. qp->r_sge.sg_list = NULL;
  430. qp->r_sge.num_sge = 1;
  431. qp->r_sge.total_len = wqe->length;
  432. break;
  433. case IB_WR_RDMA_READ:
  434. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
  435. goto inv_err;
  436. if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
  437. wqe->wr.wr.rdma.remote_addr,
  438. wqe->wr.wr.rdma.rkey,
  439. IB_ACCESS_REMOTE_READ)))
  440. goto acc_err;
  441. release = 0;
  442. sqp->s_sge.sg_list = NULL;
  443. sqp->s_sge.num_sge = 1;
  444. qp->r_sge.sge = wqe->sg_list[0];
  445. qp->r_sge.sg_list = wqe->sg_list + 1;
  446. qp->r_sge.num_sge = wqe->wr.num_sge;
  447. qp->r_sge.total_len = wqe->length;
  448. break;
  449. case IB_WR_ATOMIC_CMP_AND_SWP:
  450. case IB_WR_ATOMIC_FETCH_AND_ADD:
  451. if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
  452. goto inv_err;
  453. if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
  454. wqe->wr.wr.atomic.remote_addr,
  455. wqe->wr.wr.atomic.rkey,
  456. IB_ACCESS_REMOTE_ATOMIC)))
  457. goto acc_err;
  458. /* Perform atomic OP and save result. */
  459. maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
  460. sdata = wqe->wr.wr.atomic.compare_add;
  461. *(u64 *) sqp->s_sge.sge.vaddr =
  462. (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
  463. (u64) atomic64_add_return(sdata, maddr) - sdata :
  464. (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
  465. sdata, wqe->wr.wr.atomic.swap);
  466. atomic_dec(&qp->r_sge.sge.mr->refcount);
  467. qp->r_sge.num_sge = 0;
  468. goto send_comp;
  469. default:
  470. send_status = IB_WC_LOC_QP_OP_ERR;
  471. goto serr;
  472. }
  473. sge = &sqp->s_sge.sge;
  474. while (sqp->s_len) {
  475. u32 len = sqp->s_len;
  476. if (len > sge->length)
  477. len = sge->length;
  478. if (len > sge->sge_length)
  479. len = sge->sge_length;
  480. BUG_ON(len == 0);
  481. qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
  482. sge->vaddr += len;
  483. sge->length -= len;
  484. sge->sge_length -= len;
  485. if (sge->sge_length == 0) {
  486. if (!release)
  487. atomic_dec(&sge->mr->refcount);
  488. if (--sqp->s_sge.num_sge)
  489. *sge = *sqp->s_sge.sg_list++;
  490. } else if (sge->length == 0 && sge->mr->lkey) {
  491. if (++sge->n >= QIB_SEGSZ) {
  492. if (++sge->m >= sge->mr->mapsz)
  493. break;
  494. sge->n = 0;
  495. }
  496. sge->vaddr =
  497. sge->mr->map[sge->m]->segs[sge->n].vaddr;
  498. sge->length =
  499. sge->mr->map[sge->m]->segs[sge->n].length;
  500. }
  501. sqp->s_len -= len;
  502. }
  503. if (release)
  504. while (qp->r_sge.num_sge) {
  505. atomic_dec(&qp->r_sge.sge.mr->refcount);
  506. if (--qp->r_sge.num_sge)
  507. qp->r_sge.sge = *qp->r_sge.sg_list++;
  508. }
  509. if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
  510. goto send_comp;
  511. if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
  512. wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
  513. else
  514. wc.opcode = IB_WC_RECV;
  515. wc.wr_id = qp->r_wr_id;
  516. wc.status = IB_WC_SUCCESS;
  517. wc.byte_len = wqe->length;
  518. wc.qp = &qp->ibqp;
  519. wc.src_qp = qp->remote_qpn;
  520. wc.slid = qp->remote_ah_attr.dlid;
  521. wc.sl = qp->remote_ah_attr.sl;
  522. wc.port_num = 1;
  523. /* Signal completion event if the solicited bit is set. */
  524. qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
  525. wqe->wr.send_flags & IB_SEND_SOLICITED);
  526. send_comp:
  527. spin_lock_irqsave(&sqp->s_lock, flags);
  528. ibp->n_loop_pkts++;
  529. flush_send:
  530. sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
  531. qib_send_complete(sqp, wqe, send_status);
  532. goto again;
  533. rnr_nak:
  534. /* Handle RNR NAK */
  535. if (qp->ibqp.qp_type == IB_QPT_UC)
  536. goto send_comp;
  537. ibp->n_rnr_naks++;
  538. /*
  539. * Note: we don't need the s_lock held since the BUSY flag
  540. * makes this single threaded.
  541. */
  542. if (sqp->s_rnr_retry == 0) {
  543. send_status = IB_WC_RNR_RETRY_EXC_ERR;
  544. goto serr;
  545. }
  546. if (sqp->s_rnr_retry_cnt < 7)
  547. sqp->s_rnr_retry--;
  548. spin_lock_irqsave(&sqp->s_lock, flags);
  549. if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
  550. goto clr_busy;
  551. sqp->s_flags |= QIB_S_WAIT_RNR;
  552. sqp->s_timer.function = qib_rc_rnr_retry;
  553. sqp->s_timer.expires = jiffies +
  554. usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
  555. add_timer(&sqp->s_timer);
  556. goto clr_busy;
  557. op_err:
  558. send_status = IB_WC_REM_OP_ERR;
  559. wc.status = IB_WC_LOC_QP_OP_ERR;
  560. goto err;
  561. inv_err:
  562. send_status = IB_WC_REM_INV_REQ_ERR;
  563. wc.status = IB_WC_LOC_QP_OP_ERR;
  564. goto err;
  565. acc_err:
  566. send_status = IB_WC_REM_ACCESS_ERR;
  567. wc.status = IB_WC_LOC_PROT_ERR;
  568. err:
  569. /* responder goes to error state */
  570. qib_rc_error(qp, wc.status);
  571. serr:
  572. spin_lock_irqsave(&sqp->s_lock, flags);
  573. qib_send_complete(sqp, wqe, send_status);
  574. if (sqp->ibqp.qp_type == IB_QPT_RC) {
  575. int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
  576. sqp->s_flags &= ~QIB_S_BUSY;
  577. spin_unlock_irqrestore(&sqp->s_lock, flags);
  578. if (lastwqe) {
  579. struct ib_event ev;
  580. ev.device = sqp->ibqp.device;
  581. ev.element.qp = &sqp->ibqp;
  582. ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
  583. sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
  584. }
  585. goto done;
  586. }
  587. clr_busy:
  588. sqp->s_flags &= ~QIB_S_BUSY;
  589. unlock:
  590. spin_unlock_irqrestore(&sqp->s_lock, flags);
  591. done:
  592. if (qp && atomic_dec_and_test(&qp->refcount))
  593. wake_up(&qp->wait);
  594. }
  595. /**
  596. * qib_make_grh - construct a GRH header
  597. * @ibp: a pointer to the IB port
  598. * @hdr: a pointer to the GRH header being constructed
  599. * @grh: the global route address to send to
  600. * @hwords: the number of 32 bit words of header being sent
  601. * @nwords: the number of 32 bit words of data being sent
  602. *
  603. * Return the size of the header in 32 bit words.
  604. */
  605. u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
  606. struct ib_global_route *grh, u32 hwords, u32 nwords)
  607. {
  608. hdr->version_tclass_flow =
  609. cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
  610. (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
  611. (grh->flow_label << IB_GRH_FLOW_SHIFT));
  612. hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
  613. /* next_hdr is defined by C8-7 in ch. 8.4.1 */
  614. hdr->next_hdr = IB_GRH_NEXT_HDR;
  615. hdr->hop_limit = grh->hop_limit;
  616. /* The SGID is 32-bit aligned. */
  617. hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
  618. hdr->sgid.global.interface_id = grh->sgid_index ?
  619. ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
  620. hdr->dgid = grh->dgid;
  621. /* GRH header size in 32-bit words. */
  622. return sizeof(struct ib_grh) / sizeof(u32);
  623. }
  624. void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
  625. u32 bth0, u32 bth2)
  626. {
  627. struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  628. u16 lrh0;
  629. u32 nwords;
  630. u32 extra_bytes;
  631. /* Construct the header. */
  632. extra_bytes = -qp->s_cur_size & 3;
  633. nwords = (qp->s_cur_size + extra_bytes) >> 2;
  634. lrh0 = QIB_LRH_BTH;
  635. if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
  636. qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
  637. &qp->remote_ah_attr.grh,
  638. qp->s_hdrwords, nwords);
  639. lrh0 = QIB_LRH_GRH;
  640. }
  641. lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
  642. qp->remote_ah_attr.sl << 4;
  643. qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
  644. qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
  645. qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
  646. qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
  647. qp->remote_ah_attr.src_path_bits);
  648. bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
  649. bth0 |= extra_bytes << 20;
  650. if (qp->s_mig_state == IB_MIG_MIGRATED)
  651. bth0 |= IB_BTH_MIG_REQ;
  652. ohdr->bth[0] = cpu_to_be32(bth0);
  653. ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
  654. ohdr->bth[2] = cpu_to_be32(bth2);
  655. }
  656. /**
  657. * qib_do_send - perform a send on a QP
  658. * @work: contains a pointer to the QP
  659. *
  660. * Process entries in the send work queue until credit or queue is
  661. * exhausted. Only allow one CPU to send a packet per QP (tasklet).
  662. * Otherwise, two threads could send packets out of order.
  663. */
  664. void qib_do_send(struct work_struct *work)
  665. {
  666. struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
  667. struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
  668. struct qib_pportdata *ppd = ppd_from_ibp(ibp);
  669. int (*make_req)(struct qib_qp *qp);
  670. unsigned long flags;
  671. if ((qp->ibqp.qp_type == IB_QPT_RC ||
  672. qp->ibqp.qp_type == IB_QPT_UC) &&
  673. (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
  674. qib_ruc_loopback(qp);
  675. return;
  676. }
  677. if (qp->ibqp.qp_type == IB_QPT_RC)
  678. make_req = qib_make_rc_req;
  679. else if (qp->ibqp.qp_type == IB_QPT_UC)
  680. make_req = qib_make_uc_req;
  681. else
  682. make_req = qib_make_ud_req;
  683. spin_lock_irqsave(&qp->s_lock, flags);
  684. /* Return if we are already busy processing a work request. */
  685. if (!qib_send_ok(qp)) {
  686. spin_unlock_irqrestore(&qp->s_lock, flags);
  687. return;
  688. }
  689. qp->s_flags |= QIB_S_BUSY;
  690. spin_unlock_irqrestore(&qp->s_lock, flags);
  691. do {
  692. /* Check for a constructed packet to be sent. */
  693. if (qp->s_hdrwords != 0) {
  694. /*
  695. * If the packet cannot be sent now, return and
  696. * the send tasklet will be woken up later.
  697. */
  698. if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
  699. qp->s_cur_sge, qp->s_cur_size))
  700. break;
  701. /* Record that s_hdr is empty. */
  702. qp->s_hdrwords = 0;
  703. }
  704. } while (make_req(qp));
  705. }
  706. /*
  707. * This should be called with s_lock held.
  708. */
  709. void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
  710. enum ib_wc_status status)
  711. {
  712. u32 old_last, last;
  713. unsigned i;
  714. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
  715. return;
  716. for (i = 0; i < wqe->wr.num_sge; i++) {
  717. struct qib_sge *sge = &wqe->sg_list[i];
  718. atomic_dec(&sge->mr->refcount);
  719. }
  720. if (qp->ibqp.qp_type == IB_QPT_UD ||
  721. qp->ibqp.qp_type == IB_QPT_SMI ||
  722. qp->ibqp.qp_type == IB_QPT_GSI)
  723. atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
  724. /* See ch. 11.2.4.1 and 10.7.3.1 */
  725. if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
  726. (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
  727. status != IB_WC_SUCCESS) {
  728. struct ib_wc wc;
  729. memset(&wc, 0, sizeof wc);
  730. wc.wr_id = wqe->wr.wr_id;
  731. wc.status = status;
  732. wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
  733. wc.qp = &qp->ibqp;
  734. if (status == IB_WC_SUCCESS)
  735. wc.byte_len = wqe->length;
  736. qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
  737. status != IB_WC_SUCCESS);
  738. }
  739. last = qp->s_last;
  740. old_last = last;
  741. if (++last >= qp->s_size)
  742. last = 0;
  743. qp->s_last = last;
  744. if (qp->s_acked == old_last)
  745. qp->s_acked = last;
  746. if (qp->s_cur == old_last)
  747. qp->s_cur = last;
  748. if (qp->s_tail == old_last)
  749. qp->s_tail = last;
  750. if (qp->state == IB_QPS_SQD && last == qp->s_cur)
  751. qp->s_draining = 0;
  752. }