qib_uc.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554
  1. /*
  2. * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
  3. * All rights reserved.
  4. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include "qib.h"
  35. /* cut down ridiculously long IB macro names */
  36. #define OP(x) IB_OPCODE_UC_##x
  37. /**
  38. * qib_make_uc_req - construct a request packet (SEND, RDMA write)
  39. * @qp: a pointer to the QP
  40. *
  41. * Return 1 if constructed; otherwise, return 0.
  42. */
  43. int qib_make_uc_req(struct qib_qp *qp)
  44. {
  45. struct qib_other_headers *ohdr;
  46. struct qib_swqe *wqe;
  47. unsigned long flags;
  48. u32 hwords;
  49. u32 bth0;
  50. u32 len;
  51. u32 pmtu = qp->pmtu;
  52. int ret = 0;
  53. spin_lock_irqsave(&qp->s_lock, flags);
  54. if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
  55. if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
  56. goto bail;
  57. /* We are in the error state, flush the work request. */
  58. if (qp->s_last == qp->s_head)
  59. goto bail;
  60. /* If DMAs are in progress, we can't flush immediately. */
  61. if (atomic_read(&qp->s_dma_busy)) {
  62. qp->s_flags |= QIB_S_WAIT_DMA;
  63. goto bail;
  64. }
  65. wqe = get_swqe_ptr(qp, qp->s_last);
  66. qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
  67. goto done;
  68. }
  69. ohdr = &qp->s_hdr.u.oth;
  70. if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
  71. ohdr = &qp->s_hdr.u.l.oth;
  72. /* header size in 32-bit words LRH+BTH = (8+12)/4. */
  73. hwords = 5;
  74. bth0 = 0;
  75. /* Get the next send request. */
  76. wqe = get_swqe_ptr(qp, qp->s_cur);
  77. qp->s_wqe = NULL;
  78. switch (qp->s_state) {
  79. default:
  80. if (!(ib_qib_state_ops[qp->state] &
  81. QIB_PROCESS_NEXT_SEND_OK))
  82. goto bail;
  83. /* Check if send work queue is empty. */
  84. if (qp->s_cur == qp->s_head)
  85. goto bail;
  86. /*
  87. * Start a new request.
  88. */
  89. wqe->psn = qp->s_next_psn;
  90. qp->s_psn = qp->s_next_psn;
  91. qp->s_sge.sge = wqe->sg_list[0];
  92. qp->s_sge.sg_list = wqe->sg_list + 1;
  93. qp->s_sge.num_sge = wqe->wr.num_sge;
  94. qp->s_sge.total_len = wqe->length;
  95. len = wqe->length;
  96. qp->s_len = len;
  97. switch (wqe->wr.opcode) {
  98. case IB_WR_SEND:
  99. case IB_WR_SEND_WITH_IMM:
  100. if (len > pmtu) {
  101. qp->s_state = OP(SEND_FIRST);
  102. len = pmtu;
  103. break;
  104. }
  105. if (wqe->wr.opcode == IB_WR_SEND)
  106. qp->s_state = OP(SEND_ONLY);
  107. else {
  108. qp->s_state =
  109. OP(SEND_ONLY_WITH_IMMEDIATE);
  110. /* Immediate data comes after the BTH */
  111. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  112. hwords += 1;
  113. }
  114. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  115. bth0 |= IB_BTH_SOLICITED;
  116. qp->s_wqe = wqe;
  117. if (++qp->s_cur >= qp->s_size)
  118. qp->s_cur = 0;
  119. break;
  120. case IB_WR_RDMA_WRITE:
  121. case IB_WR_RDMA_WRITE_WITH_IMM:
  122. ohdr->u.rc.reth.vaddr =
  123. cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
  124. ohdr->u.rc.reth.rkey =
  125. cpu_to_be32(wqe->wr.wr.rdma.rkey);
  126. ohdr->u.rc.reth.length = cpu_to_be32(len);
  127. hwords += sizeof(struct ib_reth) / 4;
  128. if (len > pmtu) {
  129. qp->s_state = OP(RDMA_WRITE_FIRST);
  130. len = pmtu;
  131. break;
  132. }
  133. if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
  134. qp->s_state = OP(RDMA_WRITE_ONLY);
  135. else {
  136. qp->s_state =
  137. OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
  138. /* Immediate data comes after the RETH */
  139. ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
  140. hwords += 1;
  141. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  142. bth0 |= IB_BTH_SOLICITED;
  143. }
  144. qp->s_wqe = wqe;
  145. if (++qp->s_cur >= qp->s_size)
  146. qp->s_cur = 0;
  147. break;
  148. default:
  149. goto bail;
  150. }
  151. break;
  152. case OP(SEND_FIRST):
  153. qp->s_state = OP(SEND_MIDDLE);
  154. /* FALLTHROUGH */
  155. case OP(SEND_MIDDLE):
  156. len = qp->s_len;
  157. if (len > pmtu) {
  158. len = pmtu;
  159. break;
  160. }
  161. if (wqe->wr.opcode == IB_WR_SEND)
  162. qp->s_state = OP(SEND_LAST);
  163. else {
  164. qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
  165. /* Immediate data comes after the BTH */
  166. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  167. hwords += 1;
  168. }
  169. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  170. bth0 |= IB_BTH_SOLICITED;
  171. qp->s_wqe = wqe;
  172. if (++qp->s_cur >= qp->s_size)
  173. qp->s_cur = 0;
  174. break;
  175. case OP(RDMA_WRITE_FIRST):
  176. qp->s_state = OP(RDMA_WRITE_MIDDLE);
  177. /* FALLTHROUGH */
  178. case OP(RDMA_WRITE_MIDDLE):
  179. len = qp->s_len;
  180. if (len > pmtu) {
  181. len = pmtu;
  182. break;
  183. }
  184. if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
  185. qp->s_state = OP(RDMA_WRITE_LAST);
  186. else {
  187. qp->s_state =
  188. OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
  189. /* Immediate data comes after the BTH */
  190. ohdr->u.imm_data = wqe->wr.ex.imm_data;
  191. hwords += 1;
  192. if (wqe->wr.send_flags & IB_SEND_SOLICITED)
  193. bth0 |= IB_BTH_SOLICITED;
  194. }
  195. qp->s_wqe = wqe;
  196. if (++qp->s_cur >= qp->s_size)
  197. qp->s_cur = 0;
  198. break;
  199. }
  200. qp->s_len -= len;
  201. qp->s_hdrwords = hwords;
  202. qp->s_cur_sge = &qp->s_sge;
  203. qp->s_cur_size = len;
  204. qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
  205. qp->s_next_psn++ & QIB_PSN_MASK);
  206. done:
  207. ret = 1;
  208. goto unlock;
  209. bail:
  210. qp->s_flags &= ~QIB_S_BUSY;
  211. unlock:
  212. spin_unlock_irqrestore(&qp->s_lock, flags);
  213. return ret;
  214. }
  215. /**
  216. * qib_uc_rcv - handle an incoming UC packet
  217. * @ibp: the port the packet came in on
  218. * @hdr: the header of the packet
  219. * @has_grh: true if the packet has a GRH
  220. * @data: the packet data
  221. * @tlen: the length of the packet
  222. * @qp: the QP for this packet.
  223. *
  224. * This is called from qib_qp_rcv() to process an incoming UC packet
  225. * for the given QP.
  226. * Called at interrupt level.
  227. */
  228. void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
  229. int has_grh, void *data, u32 tlen, struct qib_qp *qp)
  230. {
  231. struct qib_other_headers *ohdr;
  232. u32 opcode;
  233. u32 hdrsize;
  234. u32 psn;
  235. u32 pad;
  236. struct ib_wc wc;
  237. u32 pmtu = qp->pmtu;
  238. struct ib_reth *reth;
  239. int ret;
  240. /* Check for GRH */
  241. if (!has_grh) {
  242. ohdr = &hdr->u.oth;
  243. hdrsize = 8 + 12; /* LRH + BTH */
  244. } else {
  245. ohdr = &hdr->u.l.oth;
  246. hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
  247. }
  248. opcode = be32_to_cpu(ohdr->bth[0]);
  249. if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
  250. return;
  251. psn = be32_to_cpu(ohdr->bth[2]);
  252. opcode >>= 24;
  253. /* Compare the PSN verses the expected PSN. */
  254. if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) {
  255. /*
  256. * Handle a sequence error.
  257. * Silently drop any current message.
  258. */
  259. qp->r_psn = psn;
  260. inv:
  261. if (qp->r_state == OP(SEND_FIRST) ||
  262. qp->r_state == OP(SEND_MIDDLE)) {
  263. set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
  264. qp->r_sge.num_sge = 0;
  265. } else
  266. while (qp->r_sge.num_sge) {
  267. atomic_dec(&qp->r_sge.sge.mr->refcount);
  268. if (--qp->r_sge.num_sge)
  269. qp->r_sge.sge = *qp->r_sge.sg_list++;
  270. }
  271. qp->r_state = OP(SEND_LAST);
  272. switch (opcode) {
  273. case OP(SEND_FIRST):
  274. case OP(SEND_ONLY):
  275. case OP(SEND_ONLY_WITH_IMMEDIATE):
  276. goto send_first;
  277. case OP(RDMA_WRITE_FIRST):
  278. case OP(RDMA_WRITE_ONLY):
  279. case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
  280. goto rdma_first;
  281. default:
  282. goto drop;
  283. }
  284. }
  285. /* Check for opcode sequence errors. */
  286. switch (qp->r_state) {
  287. case OP(SEND_FIRST):
  288. case OP(SEND_MIDDLE):
  289. if (opcode == OP(SEND_MIDDLE) ||
  290. opcode == OP(SEND_LAST) ||
  291. opcode == OP(SEND_LAST_WITH_IMMEDIATE))
  292. break;
  293. goto inv;
  294. case OP(RDMA_WRITE_FIRST):
  295. case OP(RDMA_WRITE_MIDDLE):
  296. if (opcode == OP(RDMA_WRITE_MIDDLE) ||
  297. opcode == OP(RDMA_WRITE_LAST) ||
  298. opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
  299. break;
  300. goto inv;
  301. default:
  302. if (opcode == OP(SEND_FIRST) ||
  303. opcode == OP(SEND_ONLY) ||
  304. opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
  305. opcode == OP(RDMA_WRITE_FIRST) ||
  306. opcode == OP(RDMA_WRITE_ONLY) ||
  307. opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
  308. break;
  309. goto inv;
  310. }
  311. if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
  312. qp->r_flags |= QIB_R_COMM_EST;
  313. if (qp->ibqp.event_handler) {
  314. struct ib_event ev;
  315. ev.device = qp->ibqp.device;
  316. ev.element.qp = &qp->ibqp;
  317. ev.event = IB_EVENT_COMM_EST;
  318. qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
  319. }
  320. }
  321. /* OK, process the packet. */
  322. switch (opcode) {
  323. case OP(SEND_FIRST):
  324. case OP(SEND_ONLY):
  325. case OP(SEND_ONLY_WITH_IMMEDIATE):
  326. send_first:
  327. if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
  328. qp->r_sge = qp->s_rdma_read_sge;
  329. else {
  330. ret = qib_get_rwqe(qp, 0);
  331. if (ret < 0)
  332. goto op_err;
  333. if (!ret)
  334. goto drop;
  335. /*
  336. * qp->s_rdma_read_sge will be the owner
  337. * of the mr references.
  338. */
  339. qp->s_rdma_read_sge = qp->r_sge;
  340. }
  341. qp->r_rcv_len = 0;
  342. if (opcode == OP(SEND_ONLY))
  343. goto no_immediate_data;
  344. else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
  345. goto send_last_imm;
  346. /* FALLTHROUGH */
  347. case OP(SEND_MIDDLE):
  348. /* Check for invalid length PMTU or posted rwqe len. */
  349. if (unlikely(tlen != (hdrsize + pmtu + 4)))
  350. goto rewind;
  351. qp->r_rcv_len += pmtu;
  352. if (unlikely(qp->r_rcv_len > qp->r_len))
  353. goto rewind;
  354. qib_copy_sge(&qp->r_sge, data, pmtu, 0);
  355. break;
  356. case OP(SEND_LAST_WITH_IMMEDIATE):
  357. send_last_imm:
  358. wc.ex.imm_data = ohdr->u.imm_data;
  359. hdrsize += 4;
  360. wc.wc_flags = IB_WC_WITH_IMM;
  361. goto send_last;
  362. case OP(SEND_LAST):
  363. no_immediate_data:
  364. wc.ex.imm_data = 0;
  365. wc.wc_flags = 0;
  366. send_last:
  367. /* Get the number of bytes the message was padded by. */
  368. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  369. /* Check for invalid length. */
  370. /* XXX LAST len should be >= 1 */
  371. if (unlikely(tlen < (hdrsize + pad + 4)))
  372. goto rewind;
  373. /* Don't count the CRC. */
  374. tlen -= (hdrsize + pad + 4);
  375. wc.byte_len = tlen + qp->r_rcv_len;
  376. if (unlikely(wc.byte_len > qp->r_len))
  377. goto rewind;
  378. wc.opcode = IB_WC_RECV;
  379. last_imm:
  380. qib_copy_sge(&qp->r_sge, data, tlen, 0);
  381. while (qp->s_rdma_read_sge.num_sge) {
  382. atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
  383. if (--qp->s_rdma_read_sge.num_sge)
  384. qp->s_rdma_read_sge.sge =
  385. *qp->s_rdma_read_sge.sg_list++;
  386. }
  387. wc.wr_id = qp->r_wr_id;
  388. wc.status = IB_WC_SUCCESS;
  389. wc.qp = &qp->ibqp;
  390. wc.src_qp = qp->remote_qpn;
  391. wc.slid = qp->remote_ah_attr.dlid;
  392. wc.sl = qp->remote_ah_attr.sl;
  393. /* zero fields that are N/A */
  394. wc.vendor_err = 0;
  395. wc.pkey_index = 0;
  396. wc.dlid_path_bits = 0;
  397. wc.port_num = 0;
  398. /* Signal completion event if the solicited bit is set. */
  399. qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
  400. (ohdr->bth[0] &
  401. cpu_to_be32(IB_BTH_SOLICITED)) != 0);
  402. break;
  403. case OP(RDMA_WRITE_FIRST):
  404. case OP(RDMA_WRITE_ONLY):
  405. case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
  406. rdma_first:
  407. if (unlikely(!(qp->qp_access_flags &
  408. IB_ACCESS_REMOTE_WRITE))) {
  409. goto drop;
  410. }
  411. reth = &ohdr->u.rc.reth;
  412. hdrsize += sizeof(*reth);
  413. qp->r_len = be32_to_cpu(reth->length);
  414. qp->r_rcv_len = 0;
  415. qp->r_sge.sg_list = NULL;
  416. if (qp->r_len != 0) {
  417. u32 rkey = be32_to_cpu(reth->rkey);
  418. u64 vaddr = be64_to_cpu(reth->vaddr);
  419. int ok;
  420. /* Check rkey */
  421. ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
  422. vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
  423. if (unlikely(!ok))
  424. goto drop;
  425. qp->r_sge.num_sge = 1;
  426. } else {
  427. qp->r_sge.num_sge = 0;
  428. qp->r_sge.sge.mr = NULL;
  429. qp->r_sge.sge.vaddr = NULL;
  430. qp->r_sge.sge.length = 0;
  431. qp->r_sge.sge.sge_length = 0;
  432. }
  433. if (opcode == OP(RDMA_WRITE_ONLY))
  434. goto rdma_last;
  435. else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
  436. wc.ex.imm_data = ohdr->u.rc.imm_data;
  437. goto rdma_last_imm;
  438. }
  439. /* FALLTHROUGH */
  440. case OP(RDMA_WRITE_MIDDLE):
  441. /* Check for invalid length PMTU or posted rwqe len. */
  442. if (unlikely(tlen != (hdrsize + pmtu + 4)))
  443. goto drop;
  444. qp->r_rcv_len += pmtu;
  445. if (unlikely(qp->r_rcv_len > qp->r_len))
  446. goto drop;
  447. qib_copy_sge(&qp->r_sge, data, pmtu, 1);
  448. break;
  449. case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
  450. wc.ex.imm_data = ohdr->u.imm_data;
  451. rdma_last_imm:
  452. hdrsize += 4;
  453. wc.wc_flags = IB_WC_WITH_IMM;
  454. /* Get the number of bytes the message was padded by. */
  455. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  456. /* Check for invalid length. */
  457. /* XXX LAST len should be >= 1 */
  458. if (unlikely(tlen < (hdrsize + pad + 4)))
  459. goto drop;
  460. /* Don't count the CRC. */
  461. tlen -= (hdrsize + pad + 4);
  462. if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
  463. goto drop;
  464. if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
  465. while (qp->s_rdma_read_sge.num_sge) {
  466. atomic_dec(&qp->s_rdma_read_sge.sge.mr->
  467. refcount);
  468. if (--qp->s_rdma_read_sge.num_sge)
  469. qp->s_rdma_read_sge.sge =
  470. *qp->s_rdma_read_sge.sg_list++;
  471. }
  472. else {
  473. ret = qib_get_rwqe(qp, 1);
  474. if (ret < 0)
  475. goto op_err;
  476. if (!ret)
  477. goto drop;
  478. }
  479. wc.byte_len = qp->r_len;
  480. wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
  481. goto last_imm;
  482. case OP(RDMA_WRITE_LAST):
  483. rdma_last:
  484. /* Get the number of bytes the message was padded by. */
  485. pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
  486. /* Check for invalid length. */
  487. /* XXX LAST len should be >= 1 */
  488. if (unlikely(tlen < (hdrsize + pad + 4)))
  489. goto drop;
  490. /* Don't count the CRC. */
  491. tlen -= (hdrsize + pad + 4);
  492. if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
  493. goto drop;
  494. qib_copy_sge(&qp->r_sge, data, tlen, 1);
  495. while (qp->r_sge.num_sge) {
  496. atomic_dec(&qp->r_sge.sge.mr->refcount);
  497. if (--qp->r_sge.num_sge)
  498. qp->r_sge.sge = *qp->r_sge.sg_list++;
  499. }
  500. break;
  501. default:
  502. /* Drop packet for unknown opcodes. */
  503. goto drop;
  504. }
  505. qp->r_psn++;
  506. qp->r_state = opcode;
  507. return;
  508. rewind:
  509. set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
  510. qp->r_sge.num_sge = 0;
  511. drop:
  512. ibp->n_pkt_drops++;
  513. return;
  514. op_err:
  515. qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
  516. return;
  517. }