ipath_verbs_mcast.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. /*
  2. * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
  3. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/rculist.h>
  34. #include <linux/sched.h>
  35. #include <linux/slab.h>
  36. #include "ipath_verbs.h"
  37. /*
  38. * Global table of GID to attached QPs.
  39. * The table is global to all ipath devices since a send from one QP/device
  40. * needs to be locally routed to any locally attached QPs on the same
  41. * or different device.
  42. */
  43. static struct rb_root mcast_tree;
  44. static DEFINE_SPINLOCK(mcast_lock);
  45. /**
  46. * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
  47. * @qp: the QP to link
  48. */
  49. static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
  50. {
  51. struct ipath_mcast_qp *mqp;
  52. mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
  53. if (!mqp)
  54. goto bail;
  55. mqp->qp = qp;
  56. atomic_inc(&qp->refcount);
  57. bail:
  58. return mqp;
  59. }
  60. static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
  61. {
  62. struct ipath_qp *qp = mqp->qp;
  63. /* Notify ipath_destroy_qp() if it is waiting. */
  64. if (atomic_dec_and_test(&qp->refcount))
  65. wake_up(&qp->wait);
  66. kfree(mqp);
  67. }
  68. /**
  69. * ipath_mcast_alloc - allocate the multicast GID structure
  70. * @mgid: the multicast GID
  71. *
  72. * A list of QPs will be attached to this structure.
  73. */
  74. static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
  75. {
  76. struct ipath_mcast *mcast;
  77. mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
  78. if (!mcast)
  79. goto bail;
  80. mcast->mgid = *mgid;
  81. INIT_LIST_HEAD(&mcast->qp_list);
  82. init_waitqueue_head(&mcast->wait);
  83. atomic_set(&mcast->refcount, 0);
  84. mcast->n_attached = 0;
  85. bail:
  86. return mcast;
  87. }
  88. static void ipath_mcast_free(struct ipath_mcast *mcast)
  89. {
  90. struct ipath_mcast_qp *p, *tmp;
  91. list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
  92. ipath_mcast_qp_free(p);
  93. kfree(mcast);
  94. }
  95. /**
  96. * ipath_mcast_find - search the global table for the given multicast GID
  97. * @mgid: the multicast GID to search for
  98. *
  99. * Returns NULL if not found.
  100. *
  101. * The caller is responsible for decrementing the reference count if found.
  102. */
  103. struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
  104. {
  105. struct rb_node *n;
  106. unsigned long flags;
  107. struct ipath_mcast *mcast;
  108. spin_lock_irqsave(&mcast_lock, flags);
  109. n = mcast_tree.rb_node;
  110. while (n) {
  111. int ret;
  112. mcast = rb_entry(n, struct ipath_mcast, rb_node);
  113. ret = memcmp(mgid->raw, mcast->mgid.raw,
  114. sizeof(union ib_gid));
  115. if (ret < 0)
  116. n = n->rb_left;
  117. else if (ret > 0)
  118. n = n->rb_right;
  119. else {
  120. atomic_inc(&mcast->refcount);
  121. spin_unlock_irqrestore(&mcast_lock, flags);
  122. goto bail;
  123. }
  124. }
  125. spin_unlock_irqrestore(&mcast_lock, flags);
  126. mcast = NULL;
  127. bail:
  128. return mcast;
  129. }
  130. /**
  131. * ipath_mcast_add - insert mcast GID into table and attach QP struct
  132. * @mcast: the mcast GID table
  133. * @mqp: the QP to attach
  134. *
  135. * Return zero if both were added. Return EEXIST if the GID was already in
  136. * the table but the QP was added. Return ESRCH if the QP was already
  137. * attached and neither structure was added.
  138. */
  139. static int ipath_mcast_add(struct ipath_ibdev *dev,
  140. struct ipath_mcast *mcast,
  141. struct ipath_mcast_qp *mqp)
  142. {
  143. struct rb_node **n = &mcast_tree.rb_node;
  144. struct rb_node *pn = NULL;
  145. int ret;
  146. spin_lock_irq(&mcast_lock);
  147. while (*n) {
  148. struct ipath_mcast *tmcast;
  149. struct ipath_mcast_qp *p;
  150. pn = *n;
  151. tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
  152. ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
  153. sizeof(union ib_gid));
  154. if (ret < 0) {
  155. n = &pn->rb_left;
  156. continue;
  157. }
  158. if (ret > 0) {
  159. n = &pn->rb_right;
  160. continue;
  161. }
  162. /* Search the QP list to see if this is already there. */
  163. list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
  164. if (p->qp == mqp->qp) {
  165. ret = ESRCH;
  166. goto bail;
  167. }
  168. }
  169. if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
  170. ret = ENOMEM;
  171. goto bail;
  172. }
  173. tmcast->n_attached++;
  174. list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
  175. ret = EEXIST;
  176. goto bail;
  177. }
  178. spin_lock(&dev->n_mcast_grps_lock);
  179. if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
  180. spin_unlock(&dev->n_mcast_grps_lock);
  181. ret = ENOMEM;
  182. goto bail;
  183. }
  184. dev->n_mcast_grps_allocated++;
  185. spin_unlock(&dev->n_mcast_grps_lock);
  186. mcast->n_attached++;
  187. list_add_tail_rcu(&mqp->list, &mcast->qp_list);
  188. atomic_inc(&mcast->refcount);
  189. rb_link_node(&mcast->rb_node, pn, n);
  190. rb_insert_color(&mcast->rb_node, &mcast_tree);
  191. ret = 0;
  192. bail:
  193. spin_unlock_irq(&mcast_lock);
  194. return ret;
  195. }
  196. int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  197. {
  198. struct ipath_qp *qp = to_iqp(ibqp);
  199. struct ipath_ibdev *dev = to_idev(ibqp->device);
  200. struct ipath_mcast *mcast;
  201. struct ipath_mcast_qp *mqp;
  202. int ret;
  203. /*
  204. * Allocate data structures since its better to do this outside of
  205. * spin locks and it will most likely be needed.
  206. */
  207. mcast = ipath_mcast_alloc(gid);
  208. if (mcast == NULL) {
  209. ret = -ENOMEM;
  210. goto bail;
  211. }
  212. mqp = ipath_mcast_qp_alloc(qp);
  213. if (mqp == NULL) {
  214. ipath_mcast_free(mcast);
  215. ret = -ENOMEM;
  216. goto bail;
  217. }
  218. switch (ipath_mcast_add(dev, mcast, mqp)) {
  219. case ESRCH:
  220. /* Neither was used: can't attach the same QP twice. */
  221. ipath_mcast_qp_free(mqp);
  222. ipath_mcast_free(mcast);
  223. ret = -EINVAL;
  224. goto bail;
  225. case EEXIST: /* The mcast wasn't used */
  226. ipath_mcast_free(mcast);
  227. break;
  228. case ENOMEM:
  229. /* Exceeded the maximum number of mcast groups. */
  230. ipath_mcast_qp_free(mqp);
  231. ipath_mcast_free(mcast);
  232. ret = -ENOMEM;
  233. goto bail;
  234. default:
  235. break;
  236. }
  237. ret = 0;
  238. bail:
  239. return ret;
  240. }
  241. int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
  242. {
  243. struct ipath_qp *qp = to_iqp(ibqp);
  244. struct ipath_ibdev *dev = to_idev(ibqp->device);
  245. struct ipath_mcast *mcast = NULL;
  246. struct ipath_mcast_qp *p, *tmp;
  247. struct rb_node *n;
  248. int last = 0;
  249. int ret;
  250. spin_lock_irq(&mcast_lock);
  251. /* Find the GID in the mcast table. */
  252. n = mcast_tree.rb_node;
  253. while (1) {
  254. if (n == NULL) {
  255. spin_unlock_irq(&mcast_lock);
  256. ret = -EINVAL;
  257. goto bail;
  258. }
  259. mcast = rb_entry(n, struct ipath_mcast, rb_node);
  260. ret = memcmp(gid->raw, mcast->mgid.raw,
  261. sizeof(union ib_gid));
  262. if (ret < 0)
  263. n = n->rb_left;
  264. else if (ret > 0)
  265. n = n->rb_right;
  266. else
  267. break;
  268. }
  269. /* Search the QP list. */
  270. list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
  271. if (p->qp != qp)
  272. continue;
  273. /*
  274. * We found it, so remove it, but don't poison the forward
  275. * link until we are sure there are no list walkers.
  276. */
  277. list_del_rcu(&p->list);
  278. mcast->n_attached--;
  279. /* If this was the last attached QP, remove the GID too. */
  280. if (list_empty(&mcast->qp_list)) {
  281. rb_erase(&mcast->rb_node, &mcast_tree);
  282. last = 1;
  283. }
  284. break;
  285. }
  286. spin_unlock_irq(&mcast_lock);
  287. if (p) {
  288. /*
  289. * Wait for any list walkers to finish before freeing the
  290. * list element.
  291. */
  292. wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
  293. ipath_mcast_qp_free(p);
  294. }
  295. if (last) {
  296. atomic_dec(&mcast->refcount);
  297. wait_event(mcast->wait, !atomic_read(&mcast->refcount));
  298. ipath_mcast_free(mcast);
  299. spin_lock_irq(&dev->n_mcast_grps_lock);
  300. dev->n_mcast_grps_allocated--;
  301. spin_unlock_irq(&dev->n_mcast_grps_lock);
  302. }
  303. ret = 0;
  304. bail:
  305. return ret;
  306. }
  307. int ipath_mcast_tree_empty(void)
  308. {
  309. return mcast_tree.rb_node == NULL;
  310. }