scif_fence.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. /*
  2. * Intel MIC Platform Software Stack (MPSS)
  3. *
  4. * Copyright(c) 2015 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * Intel SCIF driver.
  16. *
  17. */
  18. #include "scif_main.h"
  19. /**
  20. * scif_recv_mark: Handle SCIF_MARK request
  21. * @msg: Interrupt message
  22. *
  23. * The peer has requested a mark.
  24. */
  25. void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg)
  26. {
  27. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  28. int mark = 0;
  29. int err;
  30. err = _scif_fence_mark(ep, &mark);
  31. if (err)
  32. msg->uop = SCIF_MARK_NACK;
  33. else
  34. msg->uop = SCIF_MARK_ACK;
  35. msg->payload[0] = ep->remote_ep;
  36. msg->payload[2] = mark;
  37. scif_nodeqp_send(ep->remote_dev, msg);
  38. }
  39. /**
  40. * scif_recv_mark_resp: Handle SCIF_MARK_(N)ACK messages.
  41. * @msg: Interrupt message
  42. *
  43. * The peer has responded to a SCIF_MARK message.
  44. */
  45. void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg)
  46. {
  47. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  48. struct scif_fence_info *fence_req =
  49. (struct scif_fence_info *)msg->payload[1];
  50. mutex_lock(&ep->rma_info.rma_lock);
  51. if (msg->uop == SCIF_MARK_ACK) {
  52. fence_req->state = OP_COMPLETED;
  53. fence_req->dma_mark = (int)msg->payload[2];
  54. } else {
  55. fence_req->state = OP_FAILED;
  56. }
  57. mutex_unlock(&ep->rma_info.rma_lock);
  58. complete(&fence_req->comp);
  59. }
  60. /**
  61. * scif_recv_wait: Handle SCIF_WAIT request
  62. * @msg: Interrupt message
  63. *
  64. * The peer has requested waiting on a fence.
  65. */
  66. void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg)
  67. {
  68. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  69. struct scif_remote_fence_info *fence;
  70. /*
  71. * Allocate structure for remote fence information and
  72. * send a NACK if the allocation failed. The peer will
  73. * return ENOMEM upon receiving a NACK.
  74. */
  75. fence = kmalloc(sizeof(*fence), GFP_KERNEL);
  76. if (!fence) {
  77. msg->payload[0] = ep->remote_ep;
  78. msg->uop = SCIF_WAIT_NACK;
  79. scif_nodeqp_send(ep->remote_dev, msg);
  80. return;
  81. }
  82. /* Prepare the fence request */
  83. memcpy(&fence->msg, msg, sizeof(struct scifmsg));
  84. INIT_LIST_HEAD(&fence->list);
  85. /* Insert to the global remote fence request list */
  86. mutex_lock(&scif_info.fencelock);
  87. atomic_inc(&ep->rma_info.fence_refcount);
  88. list_add_tail(&fence->list, &scif_info.fence);
  89. mutex_unlock(&scif_info.fencelock);
  90. schedule_work(&scif_info.misc_work);
  91. }
  92. /**
  93. * scif_recv_wait_resp: Handle SCIF_WAIT_(N)ACK messages.
  94. * @msg: Interrupt message
  95. *
  96. * The peer has responded to a SCIF_WAIT message.
  97. */
  98. void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg)
  99. {
  100. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  101. struct scif_fence_info *fence_req =
  102. (struct scif_fence_info *)msg->payload[1];
  103. mutex_lock(&ep->rma_info.rma_lock);
  104. if (msg->uop == SCIF_WAIT_ACK)
  105. fence_req->state = OP_COMPLETED;
  106. else
  107. fence_req->state = OP_FAILED;
  108. mutex_unlock(&ep->rma_info.rma_lock);
  109. complete(&fence_req->comp);
  110. }
  111. /**
  112. * scif_recv_sig_local: Handle SCIF_SIG_LOCAL request
  113. * @msg: Interrupt message
  114. *
  115. * The peer has requested a signal on a local offset.
  116. */
  117. void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg)
  118. {
  119. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  120. int err;
  121. err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
  122. SCIF_WINDOW_SELF);
  123. if (err)
  124. msg->uop = SCIF_SIG_NACK;
  125. else
  126. msg->uop = SCIF_SIG_ACK;
  127. msg->payload[0] = ep->remote_ep;
  128. scif_nodeqp_send(ep->remote_dev, msg);
  129. }
  130. /**
  131. * scif_recv_sig_remote: Handle SCIF_SIGNAL_REMOTE request
  132. * @msg: Interrupt message
  133. *
  134. * The peer has requested a signal on a remote offset.
  135. */
  136. void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg)
  137. {
  138. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  139. int err;
  140. err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
  141. SCIF_WINDOW_PEER);
  142. if (err)
  143. msg->uop = SCIF_SIG_NACK;
  144. else
  145. msg->uop = SCIF_SIG_ACK;
  146. msg->payload[0] = ep->remote_ep;
  147. scif_nodeqp_send(ep->remote_dev, msg);
  148. }
  149. /**
  150. * scif_recv_sig_resp: Handle SCIF_SIG_(N)ACK messages.
  151. * @msg: Interrupt message
  152. *
  153. * The peer has responded to a signal request.
  154. */
  155. void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg)
  156. {
  157. struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
  158. struct scif_fence_info *fence_req =
  159. (struct scif_fence_info *)msg->payload[3];
  160. mutex_lock(&ep->rma_info.rma_lock);
  161. if (msg->uop == SCIF_SIG_ACK)
  162. fence_req->state = OP_COMPLETED;
  163. else
  164. fence_req->state = OP_FAILED;
  165. mutex_unlock(&ep->rma_info.rma_lock);
  166. complete(&fence_req->comp);
  167. }
  168. static inline void *scif_get_local_va(off_t off, struct scif_window *window)
  169. {
  170. struct page **pages = window->pinned_pages->pages;
  171. int page_nr = (off - window->offset) >> PAGE_SHIFT;
  172. off_t page_off = off & ~PAGE_MASK;
  173. return page_address(pages[page_nr]) + page_off;
  174. }
  175. static void scif_prog_signal_cb(void *arg)
  176. {
  177. struct scif_status *status = arg;
  178. dma_pool_free(status->ep->remote_dev->signal_pool, status,
  179. status->src_dma_addr);
  180. }
  181. static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
  182. {
  183. struct scif_endpt *ep = (struct scif_endpt *)epd;
  184. struct dma_chan *chan = ep->rma_info.dma_chan;
  185. struct dma_device *ddev = chan->device;
  186. bool x100 = !is_dma_copy_aligned(chan->device, 1, 1, 1);
  187. struct dma_async_tx_descriptor *tx;
  188. struct scif_status *status = NULL;
  189. dma_addr_t src;
  190. dma_cookie_t cookie;
  191. int err;
  192. tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
  193. if (!tx) {
  194. err = -ENOMEM;
  195. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  196. __func__, __LINE__, err);
  197. goto alloc_fail;
  198. }
  199. cookie = tx->tx_submit(tx);
  200. if (dma_submit_error(cookie)) {
  201. err = (int)cookie;
  202. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  203. __func__, __LINE__, err);
  204. goto alloc_fail;
  205. }
  206. dma_async_issue_pending(chan);
  207. if (x100) {
  208. /*
  209. * For X100 use the status descriptor to write the value to
  210. * the destination.
  211. */
  212. tx = ddev->device_prep_dma_imm_data(chan, dst, val, 0);
  213. } else {
  214. status = dma_pool_alloc(ep->remote_dev->signal_pool, GFP_KERNEL,
  215. &src);
  216. if (!status) {
  217. err = -ENOMEM;
  218. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  219. __func__, __LINE__, err);
  220. goto alloc_fail;
  221. }
  222. status->val = val;
  223. status->src_dma_addr = src;
  224. status->ep = ep;
  225. src += offsetof(struct scif_status, val);
  226. tx = ddev->device_prep_dma_memcpy(chan, dst, src, sizeof(val),
  227. DMA_PREP_INTERRUPT);
  228. }
  229. if (!tx) {
  230. err = -ENOMEM;
  231. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  232. __func__, __LINE__, err);
  233. goto dma_fail;
  234. }
  235. if (!x100) {
  236. tx->callback = scif_prog_signal_cb;
  237. tx->callback_param = status;
  238. }
  239. cookie = tx->tx_submit(tx);
  240. if (dma_submit_error(cookie)) {
  241. err = -EIO;
  242. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  243. __func__, __LINE__, err);
  244. goto dma_fail;
  245. }
  246. dma_async_issue_pending(chan);
  247. return 0;
  248. dma_fail:
  249. if (!x100)
  250. dma_pool_free(ep->remote_dev->signal_pool, status,
  251. status->src_dma_addr);
  252. alloc_fail:
  253. return err;
  254. }
  255. /*
  256. * scif_prog_signal:
  257. * @epd - Endpoint Descriptor
  258. * @offset - registered address to write @val to
  259. * @val - Value to be written at @offset
  260. * @type - Type of the window.
  261. *
  262. * Arrange to write a value to the registered offset after ensuring that the
  263. * offset provided is indeed valid.
  264. */
  265. int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
  266. enum scif_window_type type)
  267. {
  268. struct scif_endpt *ep = (struct scif_endpt *)epd;
  269. struct scif_window *window = NULL;
  270. struct scif_rma_req req;
  271. dma_addr_t dst_dma_addr;
  272. int err;
  273. mutex_lock(&ep->rma_info.rma_lock);
  274. req.out_window = &window;
  275. req.offset = offset;
  276. req.nr_bytes = sizeof(u64);
  277. req.prot = SCIF_PROT_WRITE;
  278. req.type = SCIF_WINDOW_SINGLE;
  279. if (type == SCIF_WINDOW_SELF)
  280. req.head = &ep->rma_info.reg_list;
  281. else
  282. req.head = &ep->rma_info.remote_reg_list;
  283. /* Does a valid window exist? */
  284. err = scif_query_window(&req);
  285. if (err) {
  286. dev_err(scif_info.mdev.this_device,
  287. "%s %d err %d\n", __func__, __LINE__, err);
  288. goto unlock_ret;
  289. }
  290. if (scif_is_mgmt_node() && scifdev_self(ep->remote_dev)) {
  291. u64 *dst_virt;
  292. if (type == SCIF_WINDOW_SELF)
  293. dst_virt = scif_get_local_va(offset, window);
  294. else
  295. dst_virt =
  296. scif_get_local_va(offset, (struct scif_window *)
  297. window->peer_window);
  298. *dst_virt = val;
  299. } else {
  300. dst_dma_addr = __scif_off_to_dma_addr(window, offset);
  301. err = _scif_prog_signal(epd, dst_dma_addr, val);
  302. }
  303. unlock_ret:
  304. mutex_unlock(&ep->rma_info.rma_lock);
  305. return err;
  306. }
  307. static int _scif_fence_wait(scif_epd_t epd, int mark)
  308. {
  309. struct scif_endpt *ep = (struct scif_endpt *)epd;
  310. dma_cookie_t cookie = mark & ~SCIF_REMOTE_FENCE;
  311. int err;
  312. /* Wait for DMA callback in scif_fence_mark_cb(..) */
  313. err = wait_event_interruptible_timeout(ep->rma_info.markwq,
  314. dma_async_is_tx_complete(
  315. ep->rma_info.dma_chan,
  316. cookie, NULL, NULL) ==
  317. DMA_COMPLETE,
  318. SCIF_NODE_ALIVE_TIMEOUT);
  319. if (!err)
  320. err = -ETIMEDOUT;
  321. else if (err > 0)
  322. err = 0;
  323. return err;
  324. }
  325. /**
  326. * scif_rma_handle_remote_fences:
  327. *
  328. * This routine services remote fence requests.
  329. */
  330. void scif_rma_handle_remote_fences(void)
  331. {
  332. struct list_head *item, *tmp;
  333. struct scif_remote_fence_info *fence;
  334. struct scif_endpt *ep;
  335. int mark, err;
  336. might_sleep();
  337. mutex_lock(&scif_info.fencelock);
  338. list_for_each_safe(item, tmp, &scif_info.fence) {
  339. fence = list_entry(item, struct scif_remote_fence_info,
  340. list);
  341. /* Remove fence from global list */
  342. list_del(&fence->list);
  343. /* Initiate the fence operation */
  344. ep = (struct scif_endpt *)fence->msg.payload[0];
  345. mark = fence->msg.payload[2];
  346. err = _scif_fence_wait(ep, mark);
  347. if (err)
  348. fence->msg.uop = SCIF_WAIT_NACK;
  349. else
  350. fence->msg.uop = SCIF_WAIT_ACK;
  351. fence->msg.payload[0] = ep->remote_ep;
  352. scif_nodeqp_send(ep->remote_dev, &fence->msg);
  353. kfree(fence);
  354. if (!atomic_sub_return(1, &ep->rma_info.fence_refcount))
  355. schedule_work(&scif_info.misc_work);
  356. }
  357. mutex_unlock(&scif_info.fencelock);
  358. }
  359. static int _scif_send_fence(scif_epd_t epd, int uop, int mark, int *out_mark)
  360. {
  361. int err;
  362. struct scifmsg msg;
  363. struct scif_fence_info *fence_req;
  364. struct scif_endpt *ep = (struct scif_endpt *)epd;
  365. fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
  366. if (!fence_req) {
  367. err = -ENOMEM;
  368. goto error;
  369. }
  370. fence_req->state = OP_IN_PROGRESS;
  371. init_completion(&fence_req->comp);
  372. msg.src = ep->port;
  373. msg.uop = uop;
  374. msg.payload[0] = ep->remote_ep;
  375. msg.payload[1] = (u64)fence_req;
  376. if (uop == SCIF_WAIT)
  377. msg.payload[2] = mark;
  378. spin_lock(&ep->lock);
  379. if (ep->state == SCIFEP_CONNECTED)
  380. err = scif_nodeqp_send(ep->remote_dev, &msg);
  381. else
  382. err = -ENOTCONN;
  383. spin_unlock(&ep->lock);
  384. if (err)
  385. goto error_free;
  386. retry:
  387. /* Wait for a SCIF_WAIT_(N)ACK message */
  388. err = wait_for_completion_timeout(&fence_req->comp,
  389. SCIF_NODE_ALIVE_TIMEOUT);
  390. if (!err && scifdev_alive(ep))
  391. goto retry;
  392. if (!err)
  393. err = -ENODEV;
  394. if (err > 0)
  395. err = 0;
  396. mutex_lock(&ep->rma_info.rma_lock);
  397. if (err < 0) {
  398. if (fence_req->state == OP_IN_PROGRESS)
  399. fence_req->state = OP_FAILED;
  400. }
  401. if (fence_req->state == OP_FAILED && !err)
  402. err = -ENOMEM;
  403. if (uop == SCIF_MARK && fence_req->state == OP_COMPLETED)
  404. *out_mark = SCIF_REMOTE_FENCE | fence_req->dma_mark;
  405. mutex_unlock(&ep->rma_info.rma_lock);
  406. error_free:
  407. kfree(fence_req);
  408. error:
  409. return err;
  410. }
  411. /**
  412. * scif_send_fence_mark:
  413. * @epd: end point descriptor.
  414. * @out_mark: Output DMA mark reported by peer.
  415. *
  416. * Send a remote fence mark request.
  417. */
  418. static int scif_send_fence_mark(scif_epd_t epd, int *out_mark)
  419. {
  420. return _scif_send_fence(epd, SCIF_MARK, 0, out_mark);
  421. }
  422. /**
  423. * scif_send_fence_wait:
  424. * @epd: end point descriptor.
  425. * @mark: DMA mark to wait for.
  426. *
  427. * Send a remote fence wait request.
  428. */
  429. static int scif_send_fence_wait(scif_epd_t epd, int mark)
  430. {
  431. return _scif_send_fence(epd, SCIF_WAIT, mark, NULL);
  432. }
  433. static int _scif_send_fence_signal_wait(struct scif_endpt *ep,
  434. struct scif_fence_info *fence_req)
  435. {
  436. int err;
  437. retry:
  438. /* Wait for a SCIF_SIG_(N)ACK message */
  439. err = wait_for_completion_timeout(&fence_req->comp,
  440. SCIF_NODE_ALIVE_TIMEOUT);
  441. if (!err && scifdev_alive(ep))
  442. goto retry;
  443. if (!err)
  444. err = -ENODEV;
  445. if (err > 0)
  446. err = 0;
  447. if (err < 0) {
  448. mutex_lock(&ep->rma_info.rma_lock);
  449. if (fence_req->state == OP_IN_PROGRESS)
  450. fence_req->state = OP_FAILED;
  451. mutex_unlock(&ep->rma_info.rma_lock);
  452. }
  453. if (fence_req->state == OP_FAILED && !err)
  454. err = -ENXIO;
  455. return err;
  456. }
  457. /**
  458. * scif_send_fence_signal:
  459. * @epd - endpoint descriptor
  460. * @loff - local offset
  461. * @lval - local value to write to loffset
  462. * @roff - remote offset
  463. * @rval - remote value to write to roffset
  464. * @flags - flags
  465. *
  466. * Sends a remote fence signal request
  467. */
  468. static int scif_send_fence_signal(scif_epd_t epd, off_t roff, u64 rval,
  469. off_t loff, u64 lval, int flags)
  470. {
  471. int err = 0;
  472. struct scifmsg msg;
  473. struct scif_fence_info *fence_req;
  474. struct scif_endpt *ep = (struct scif_endpt *)epd;
  475. fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
  476. if (!fence_req) {
  477. err = -ENOMEM;
  478. goto error;
  479. }
  480. fence_req->state = OP_IN_PROGRESS;
  481. init_completion(&fence_req->comp);
  482. msg.src = ep->port;
  483. if (flags & SCIF_SIGNAL_LOCAL) {
  484. msg.uop = SCIF_SIG_LOCAL;
  485. msg.payload[0] = ep->remote_ep;
  486. msg.payload[1] = roff;
  487. msg.payload[2] = rval;
  488. msg.payload[3] = (u64)fence_req;
  489. spin_lock(&ep->lock);
  490. if (ep->state == SCIFEP_CONNECTED)
  491. err = scif_nodeqp_send(ep->remote_dev, &msg);
  492. else
  493. err = -ENOTCONN;
  494. spin_unlock(&ep->lock);
  495. if (err)
  496. goto error_free;
  497. err = _scif_send_fence_signal_wait(ep, fence_req);
  498. if (err)
  499. goto error_free;
  500. }
  501. fence_req->state = OP_IN_PROGRESS;
  502. if (flags & SCIF_SIGNAL_REMOTE) {
  503. msg.uop = SCIF_SIG_REMOTE;
  504. msg.payload[0] = ep->remote_ep;
  505. msg.payload[1] = loff;
  506. msg.payload[2] = lval;
  507. msg.payload[3] = (u64)fence_req;
  508. spin_lock(&ep->lock);
  509. if (ep->state == SCIFEP_CONNECTED)
  510. err = scif_nodeqp_send(ep->remote_dev, &msg);
  511. else
  512. err = -ENOTCONN;
  513. spin_unlock(&ep->lock);
  514. if (err)
  515. goto error_free;
  516. err = _scif_send_fence_signal_wait(ep, fence_req);
  517. }
  518. error_free:
  519. kfree(fence_req);
  520. error:
  521. return err;
  522. }
  523. static void scif_fence_mark_cb(void *arg)
  524. {
  525. struct scif_endpt *ep = (struct scif_endpt *)arg;
  526. wake_up_interruptible(&ep->rma_info.markwq);
  527. atomic_dec(&ep->rma_info.fence_refcount);
  528. }
  529. /*
  530. * _scif_fence_mark:
  531. *
  532. * @epd - endpoint descriptor
  533. * Set up a mark for this endpoint and return the value of the mark.
  534. */
  535. int _scif_fence_mark(scif_epd_t epd, int *mark)
  536. {
  537. struct scif_endpt *ep = (struct scif_endpt *)epd;
  538. struct dma_chan *chan = ep->rma_info.dma_chan;
  539. struct dma_device *ddev = chan->device;
  540. struct dma_async_tx_descriptor *tx;
  541. dma_cookie_t cookie;
  542. int err;
  543. tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
  544. if (!tx) {
  545. err = -ENOMEM;
  546. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  547. __func__, __LINE__, err);
  548. return err;
  549. }
  550. cookie = tx->tx_submit(tx);
  551. if (dma_submit_error(cookie)) {
  552. err = (int)cookie;
  553. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  554. __func__, __LINE__, err);
  555. return err;
  556. }
  557. dma_async_issue_pending(chan);
  558. tx = ddev->device_prep_dma_interrupt(chan, DMA_PREP_INTERRUPT);
  559. if (!tx) {
  560. err = -ENOMEM;
  561. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  562. __func__, __LINE__, err);
  563. return err;
  564. }
  565. tx->callback = scif_fence_mark_cb;
  566. tx->callback_param = ep;
  567. *mark = cookie = tx->tx_submit(tx);
  568. if (dma_submit_error(cookie)) {
  569. err = (int)cookie;
  570. dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
  571. __func__, __LINE__, err);
  572. return err;
  573. }
  574. atomic_inc(&ep->rma_info.fence_refcount);
  575. dma_async_issue_pending(chan);
  576. return 0;
  577. }
  578. #define SCIF_LOOPB_MAGIC_MARK 0xdead
  579. int scif_fence_mark(scif_epd_t epd, int flags, int *mark)
  580. {
  581. struct scif_endpt *ep = (struct scif_endpt *)epd;
  582. int err = 0;
  583. dev_dbg(scif_info.mdev.this_device,
  584. "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x\n",
  585. ep, flags, *mark);
  586. err = scif_verify_epd(ep);
  587. if (err)
  588. return err;
  589. /* Invalid flags? */
  590. if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER))
  591. return -EINVAL;
  592. /* At least one of init self or peer RMA should be set */
  593. if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
  594. return -EINVAL;
  595. /* Exactly one of init self or peer RMA should be set but not both */
  596. if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
  597. return -EINVAL;
  598. /*
  599. * Management node loopback does not need to use DMA.
  600. * Return a valid mark to be symmetric.
  601. */
  602. if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
  603. *mark = SCIF_LOOPB_MAGIC_MARK;
  604. return 0;
  605. }
  606. if (flags & SCIF_FENCE_INIT_SELF)
  607. err = _scif_fence_mark(epd, mark);
  608. else
  609. err = scif_send_fence_mark(ep, mark);
  610. if (err)
  611. dev_err(scif_info.mdev.this_device,
  612. "%s %d err %d\n", __func__, __LINE__, err);
  613. dev_dbg(scif_info.mdev.this_device,
  614. "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x err %d\n",
  615. ep, flags, *mark, err);
  616. return err;
  617. }
  618. EXPORT_SYMBOL_GPL(scif_fence_mark);
  619. int scif_fence_wait(scif_epd_t epd, int mark)
  620. {
  621. struct scif_endpt *ep = (struct scif_endpt *)epd;
  622. int err = 0;
  623. dev_dbg(scif_info.mdev.this_device,
  624. "SCIFAPI fence_wait: ep %p mark 0x%x\n",
  625. ep, mark);
  626. err = scif_verify_epd(ep);
  627. if (err)
  628. return err;
  629. /*
  630. * Management node loopback does not need to use DMA.
  631. * The only valid mark provided is 0 so simply
  632. * return success if the mark is valid.
  633. */
  634. if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
  635. if (mark == SCIF_LOOPB_MAGIC_MARK)
  636. return 0;
  637. else
  638. return -EINVAL;
  639. }
  640. if (mark & SCIF_REMOTE_FENCE)
  641. err = scif_send_fence_wait(epd, mark);
  642. else
  643. err = _scif_fence_wait(epd, mark);
  644. if (err < 0)
  645. dev_err(scif_info.mdev.this_device,
  646. "%s %d err %d\n", __func__, __LINE__, err);
  647. return err;
  648. }
  649. EXPORT_SYMBOL_GPL(scif_fence_wait);
  650. int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval,
  651. off_t roff, u64 rval, int flags)
  652. {
  653. struct scif_endpt *ep = (struct scif_endpt *)epd;
  654. int err = 0;
  655. dev_dbg(scif_info.mdev.this_device,
  656. "SCIFAPI fence_signal: ep %p loff 0x%lx lval 0x%llx roff 0x%lx rval 0x%llx flags 0x%x\n",
  657. ep, loff, lval, roff, rval, flags);
  658. err = scif_verify_epd(ep);
  659. if (err)
  660. return err;
  661. /* Invalid flags? */
  662. if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER |
  663. SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE))
  664. return -EINVAL;
  665. /* At least one of init self or peer RMA should be set */
  666. if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
  667. return -EINVAL;
  668. /* Exactly one of init self or peer RMA should be set but not both */
  669. if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
  670. return -EINVAL;
  671. /* At least one of SCIF_SIGNAL_LOCAL or SCIF_SIGNAL_REMOTE required */
  672. if (!(flags & (SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE)))
  673. return -EINVAL;
  674. /* Only Dword offsets allowed */
  675. if ((flags & SCIF_SIGNAL_LOCAL) && (loff & (sizeof(u32) - 1)))
  676. return -EINVAL;
  677. /* Only Dword aligned offsets allowed */
  678. if ((flags & SCIF_SIGNAL_REMOTE) && (roff & (sizeof(u32) - 1)))
  679. return -EINVAL;
  680. if (flags & SCIF_FENCE_INIT_PEER) {
  681. err = scif_send_fence_signal(epd, roff, rval, loff,
  682. lval, flags);
  683. } else {
  684. /* Local Signal in Local RAS */
  685. if (flags & SCIF_SIGNAL_LOCAL) {
  686. err = scif_prog_signal(epd, loff, lval,
  687. SCIF_WINDOW_SELF);
  688. if (err)
  689. goto error_ret;
  690. }
  691. /* Signal in Remote RAS */
  692. if (flags & SCIF_SIGNAL_REMOTE)
  693. err = scif_prog_signal(epd, roff,
  694. rval, SCIF_WINDOW_PEER);
  695. }
  696. error_ret:
  697. if (err)
  698. dev_err(scif_info.mdev.this_device,
  699. "%s %d err %d\n", __func__, __LINE__, err);
  700. return err;
  701. }
  702. EXPORT_SYMBOL_GPL(scif_fence_signal);