multicast.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. /*
  2. * Copyright (c) 2006 Intel Corporation. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/completion.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/err.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/export.h>
  37. #include <linux/slab.h>
  38. #include <linux/bitops.h>
  39. #include <linux/random.h>
  40. #include <rdma/ib_cache.h>
  41. #include "sa.h"
  42. static void mcast_add_one(struct ib_device *device);
  43. static void mcast_remove_one(struct ib_device *device);
  44. static struct ib_client mcast_client = {
  45. .name = "ib_multicast",
  46. .add = mcast_add_one,
  47. .remove = mcast_remove_one
  48. };
  49. static struct ib_sa_client sa_client;
  50. static struct workqueue_struct *mcast_wq;
  51. static union ib_gid mgid0;
  52. struct mcast_device;
  53. struct mcast_port {
  54. struct mcast_device *dev;
  55. spinlock_t lock;
  56. struct rb_root table;
  57. atomic_t refcount;
  58. struct completion comp;
  59. u8 port_num;
  60. };
  61. struct mcast_device {
  62. struct ib_device *device;
  63. struct ib_event_handler event_handler;
  64. int start_port;
  65. int end_port;
  66. struct mcast_port port[0];
  67. };
  68. enum mcast_state {
  69. MCAST_JOINING,
  70. MCAST_MEMBER,
  71. MCAST_ERROR,
  72. };
  73. enum mcast_group_state {
  74. MCAST_IDLE,
  75. MCAST_BUSY,
  76. MCAST_GROUP_ERROR,
  77. MCAST_PKEY_EVENT
  78. };
  79. enum {
  80. MCAST_INVALID_PKEY_INDEX = 0xFFFF
  81. };
  82. struct mcast_member;
  83. struct mcast_group {
  84. struct ib_sa_mcmember_rec rec;
  85. struct rb_node node;
  86. struct mcast_port *port;
  87. spinlock_t lock;
  88. struct work_struct work;
  89. struct list_head pending_list;
  90. struct list_head active_list;
  91. struct mcast_member *last_join;
  92. int members[3];
  93. atomic_t refcount;
  94. enum mcast_group_state state;
  95. struct ib_sa_query *query;
  96. int query_id;
  97. u16 pkey_index;
  98. u8 leave_state;
  99. int retries;
  100. };
  101. struct mcast_member {
  102. struct ib_sa_multicast multicast;
  103. struct ib_sa_client *client;
  104. struct mcast_group *group;
  105. struct list_head list;
  106. enum mcast_state state;
  107. atomic_t refcount;
  108. struct completion comp;
  109. };
  110. static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
  111. void *context);
  112. static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
  113. void *context);
  114. static struct mcast_group *mcast_find(struct mcast_port *port,
  115. union ib_gid *mgid)
  116. {
  117. struct rb_node *node = port->table.rb_node;
  118. struct mcast_group *group;
  119. int ret;
  120. while (node) {
  121. group = rb_entry(node, struct mcast_group, node);
  122. ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid);
  123. if (!ret)
  124. return group;
  125. if (ret < 0)
  126. node = node->rb_left;
  127. else
  128. node = node->rb_right;
  129. }
  130. return NULL;
  131. }
  132. static struct mcast_group *mcast_insert(struct mcast_port *port,
  133. struct mcast_group *group,
  134. int allow_duplicates)
  135. {
  136. struct rb_node **link = &port->table.rb_node;
  137. struct rb_node *parent = NULL;
  138. struct mcast_group *cur_group;
  139. int ret;
  140. while (*link) {
  141. parent = *link;
  142. cur_group = rb_entry(parent, struct mcast_group, node);
  143. ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw,
  144. sizeof group->rec.mgid);
  145. if (ret < 0)
  146. link = &(*link)->rb_left;
  147. else if (ret > 0)
  148. link = &(*link)->rb_right;
  149. else if (allow_duplicates)
  150. link = &(*link)->rb_left;
  151. else
  152. return cur_group;
  153. }
  154. rb_link_node(&group->node, parent, link);
  155. rb_insert_color(&group->node, &port->table);
  156. return NULL;
  157. }
  158. static void deref_port(struct mcast_port *port)
  159. {
  160. if (atomic_dec_and_test(&port->refcount))
  161. complete(&port->comp);
  162. }
  163. static void release_group(struct mcast_group *group)
  164. {
  165. struct mcast_port *port = group->port;
  166. unsigned long flags;
  167. spin_lock_irqsave(&port->lock, flags);
  168. if (atomic_dec_and_test(&group->refcount)) {
  169. rb_erase(&group->node, &port->table);
  170. spin_unlock_irqrestore(&port->lock, flags);
  171. kfree(group);
  172. deref_port(port);
  173. } else
  174. spin_unlock_irqrestore(&port->lock, flags);
  175. }
  176. static void deref_member(struct mcast_member *member)
  177. {
  178. if (atomic_dec_and_test(&member->refcount))
  179. complete(&member->comp);
  180. }
  181. static void queue_join(struct mcast_member *member)
  182. {
  183. struct mcast_group *group = member->group;
  184. unsigned long flags;
  185. spin_lock_irqsave(&group->lock, flags);
  186. list_add_tail(&member->list, &group->pending_list);
  187. if (group->state == MCAST_IDLE) {
  188. group->state = MCAST_BUSY;
  189. atomic_inc(&group->refcount);
  190. queue_work(mcast_wq, &group->work);
  191. }
  192. spin_unlock_irqrestore(&group->lock, flags);
  193. }
  194. /*
  195. * A multicast group has three types of members: full member, non member, and
  196. * send only member. We need to keep track of the number of members of each
  197. * type based on their join state. Adjust the number of members the belong to
  198. * the specified join states.
  199. */
  200. static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
  201. {
  202. int i;
  203. for (i = 0; i < 3; i++, join_state >>= 1)
  204. if (join_state & 0x1)
  205. group->members[i] += inc;
  206. }
  207. /*
  208. * If a multicast group has zero members left for a particular join state, but
  209. * the group is still a member with the SA, we need to leave that join state.
  210. * Determine which join states we still belong to, but that do not have any
  211. * active members.
  212. */
  213. static u8 get_leave_state(struct mcast_group *group)
  214. {
  215. u8 leave_state = 0;
  216. int i;
  217. for (i = 0; i < 3; i++)
  218. if (!group->members[i])
  219. leave_state |= (0x1 << i);
  220. return leave_state & group->rec.join_state;
  221. }
  222. static int check_selector(ib_sa_comp_mask comp_mask,
  223. ib_sa_comp_mask selector_mask,
  224. ib_sa_comp_mask value_mask,
  225. u8 selector, u8 src_value, u8 dst_value)
  226. {
  227. int err;
  228. if (!(comp_mask & selector_mask) || !(comp_mask & value_mask))
  229. return 0;
  230. switch (selector) {
  231. case IB_SA_GT:
  232. err = (src_value <= dst_value);
  233. break;
  234. case IB_SA_LT:
  235. err = (src_value >= dst_value);
  236. break;
  237. case IB_SA_EQ:
  238. err = (src_value != dst_value);
  239. break;
  240. default:
  241. err = 0;
  242. break;
  243. }
  244. return err;
  245. }
  246. static int cmp_rec(struct ib_sa_mcmember_rec *src,
  247. struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask)
  248. {
  249. /* MGID must already match */
  250. if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID &&
  251. memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid))
  252. return -EINVAL;
  253. if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey)
  254. return -EINVAL;
  255. if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid)
  256. return -EINVAL;
  257. if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR,
  258. IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector,
  259. src->mtu, dst->mtu))
  260. return -EINVAL;
  261. if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS &&
  262. src->traffic_class != dst->traffic_class)
  263. return -EINVAL;
  264. if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey)
  265. return -EINVAL;
  266. if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR,
  267. IB_SA_MCMEMBER_REC_RATE, dst->rate_selector,
  268. src->rate, dst->rate))
  269. return -EINVAL;
  270. if (check_selector(comp_mask,
  271. IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR,
  272. IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME,
  273. dst->packet_life_time_selector,
  274. src->packet_life_time, dst->packet_life_time))
  275. return -EINVAL;
  276. if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl)
  277. return -EINVAL;
  278. if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL &&
  279. src->flow_label != dst->flow_label)
  280. return -EINVAL;
  281. if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT &&
  282. src->hop_limit != dst->hop_limit)
  283. return -EINVAL;
  284. if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope)
  285. return -EINVAL;
  286. /* join_state checked separately, proxy_join ignored */
  287. return 0;
  288. }
  289. static int send_join(struct mcast_group *group, struct mcast_member *member)
  290. {
  291. struct mcast_port *port = group->port;
  292. int ret;
  293. group->last_join = member;
  294. ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
  295. port->port_num, IB_MGMT_METHOD_SET,
  296. &member->multicast.rec,
  297. member->multicast.comp_mask,
  298. 3000, GFP_KERNEL, join_handler, group,
  299. &group->query);
  300. if (ret >= 0) {
  301. group->query_id = ret;
  302. ret = 0;
  303. }
  304. return ret;
  305. }
  306. static int send_leave(struct mcast_group *group, u8 leave_state)
  307. {
  308. struct mcast_port *port = group->port;
  309. struct ib_sa_mcmember_rec rec;
  310. int ret;
  311. rec = group->rec;
  312. rec.join_state = leave_state;
  313. group->leave_state = leave_state;
  314. ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
  315. port->port_num, IB_SA_METHOD_DELETE, &rec,
  316. IB_SA_MCMEMBER_REC_MGID |
  317. IB_SA_MCMEMBER_REC_PORT_GID |
  318. IB_SA_MCMEMBER_REC_JOIN_STATE,
  319. 3000, GFP_KERNEL, leave_handler,
  320. group, &group->query);
  321. if (ret >= 0) {
  322. group->query_id = ret;
  323. ret = 0;
  324. }
  325. return ret;
  326. }
  327. static void join_group(struct mcast_group *group, struct mcast_member *member,
  328. u8 join_state)
  329. {
  330. member->state = MCAST_MEMBER;
  331. adjust_membership(group, join_state, 1);
  332. group->rec.join_state |= join_state;
  333. member->multicast.rec = group->rec;
  334. member->multicast.rec.join_state = join_state;
  335. list_move(&member->list, &group->active_list);
  336. }
  337. static int fail_join(struct mcast_group *group, struct mcast_member *member,
  338. int status)
  339. {
  340. spin_lock_irq(&group->lock);
  341. list_del_init(&member->list);
  342. spin_unlock_irq(&group->lock);
  343. return member->multicast.callback(status, &member->multicast);
  344. }
  345. static void process_group_error(struct mcast_group *group)
  346. {
  347. struct mcast_member *member;
  348. int ret = 0;
  349. u16 pkey_index;
  350. if (group->state == MCAST_PKEY_EVENT)
  351. ret = ib_find_pkey(group->port->dev->device,
  352. group->port->port_num,
  353. be16_to_cpu(group->rec.pkey), &pkey_index);
  354. spin_lock_irq(&group->lock);
  355. if (group->state == MCAST_PKEY_EVENT && !ret &&
  356. group->pkey_index == pkey_index)
  357. goto out;
  358. while (!list_empty(&group->active_list)) {
  359. member = list_entry(group->active_list.next,
  360. struct mcast_member, list);
  361. atomic_inc(&member->refcount);
  362. list_del_init(&member->list);
  363. adjust_membership(group, member->multicast.rec.join_state, -1);
  364. member->state = MCAST_ERROR;
  365. spin_unlock_irq(&group->lock);
  366. ret = member->multicast.callback(-ENETRESET,
  367. &member->multicast);
  368. deref_member(member);
  369. if (ret)
  370. ib_sa_free_multicast(&member->multicast);
  371. spin_lock_irq(&group->lock);
  372. }
  373. group->rec.join_state = 0;
  374. out:
  375. group->state = MCAST_BUSY;
  376. spin_unlock_irq(&group->lock);
  377. }
  378. static void mcast_work_handler(struct work_struct *work)
  379. {
  380. struct mcast_group *group;
  381. struct mcast_member *member;
  382. struct ib_sa_multicast *multicast;
  383. int status, ret;
  384. u8 join_state;
  385. group = container_of(work, typeof(*group), work);
  386. retest:
  387. spin_lock_irq(&group->lock);
  388. while (!list_empty(&group->pending_list) ||
  389. (group->state != MCAST_BUSY)) {
  390. if (group->state != MCAST_BUSY) {
  391. spin_unlock_irq(&group->lock);
  392. process_group_error(group);
  393. goto retest;
  394. }
  395. member = list_entry(group->pending_list.next,
  396. struct mcast_member, list);
  397. multicast = &member->multicast;
  398. join_state = multicast->rec.join_state;
  399. atomic_inc(&member->refcount);
  400. if (join_state == (group->rec.join_state & join_state)) {
  401. status = cmp_rec(&group->rec, &multicast->rec,
  402. multicast->comp_mask);
  403. if (!status)
  404. join_group(group, member, join_state);
  405. else
  406. list_del_init(&member->list);
  407. spin_unlock_irq(&group->lock);
  408. ret = multicast->callback(status, multicast);
  409. } else {
  410. spin_unlock_irq(&group->lock);
  411. status = send_join(group, member);
  412. if (!status) {
  413. deref_member(member);
  414. return;
  415. }
  416. ret = fail_join(group, member, status);
  417. }
  418. deref_member(member);
  419. if (ret)
  420. ib_sa_free_multicast(&member->multicast);
  421. spin_lock_irq(&group->lock);
  422. }
  423. join_state = get_leave_state(group);
  424. if (join_state) {
  425. group->rec.join_state &= ~join_state;
  426. spin_unlock_irq(&group->lock);
  427. if (send_leave(group, join_state))
  428. goto retest;
  429. } else {
  430. group->state = MCAST_IDLE;
  431. spin_unlock_irq(&group->lock);
  432. release_group(group);
  433. }
  434. }
  435. /*
  436. * Fail a join request if it is still active - at the head of the pending queue.
  437. */
  438. static void process_join_error(struct mcast_group *group, int status)
  439. {
  440. struct mcast_member *member;
  441. int ret;
  442. spin_lock_irq(&group->lock);
  443. member = list_entry(group->pending_list.next,
  444. struct mcast_member, list);
  445. if (group->last_join == member) {
  446. atomic_inc(&member->refcount);
  447. list_del_init(&member->list);
  448. spin_unlock_irq(&group->lock);
  449. ret = member->multicast.callback(status, &member->multicast);
  450. deref_member(member);
  451. if (ret)
  452. ib_sa_free_multicast(&member->multicast);
  453. } else
  454. spin_unlock_irq(&group->lock);
  455. }
  456. static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
  457. void *context)
  458. {
  459. struct mcast_group *group = context;
  460. u16 pkey_index = MCAST_INVALID_PKEY_INDEX;
  461. if (status)
  462. process_join_error(group, status);
  463. else {
  464. ib_find_pkey(group->port->dev->device, group->port->port_num,
  465. be16_to_cpu(rec->pkey), &pkey_index);
  466. spin_lock_irq(&group->port->lock);
  467. group->rec = *rec;
  468. if (group->state == MCAST_BUSY &&
  469. group->pkey_index == MCAST_INVALID_PKEY_INDEX)
  470. group->pkey_index = pkey_index;
  471. if (!memcmp(&mgid0, &group->rec.mgid, sizeof mgid0)) {
  472. rb_erase(&group->node, &group->port->table);
  473. mcast_insert(group->port, group, 1);
  474. }
  475. spin_unlock_irq(&group->port->lock);
  476. }
  477. mcast_work_handler(&group->work);
  478. }
  479. static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
  480. void *context)
  481. {
  482. struct mcast_group *group = context;
  483. if (status && group->retries > 0 &&
  484. !send_leave(group, group->leave_state))
  485. group->retries--;
  486. else
  487. mcast_work_handler(&group->work);
  488. }
  489. static struct mcast_group *acquire_group(struct mcast_port *port,
  490. union ib_gid *mgid, gfp_t gfp_mask)
  491. {
  492. struct mcast_group *group, *cur_group;
  493. unsigned long flags;
  494. int is_mgid0;
  495. is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0);
  496. if (!is_mgid0) {
  497. spin_lock_irqsave(&port->lock, flags);
  498. group = mcast_find(port, mgid);
  499. if (group)
  500. goto found;
  501. spin_unlock_irqrestore(&port->lock, flags);
  502. }
  503. group = kzalloc(sizeof *group, gfp_mask);
  504. if (!group)
  505. return NULL;
  506. group->retries = 3;
  507. group->port = port;
  508. group->rec.mgid = *mgid;
  509. group->pkey_index = MCAST_INVALID_PKEY_INDEX;
  510. INIT_LIST_HEAD(&group->pending_list);
  511. INIT_LIST_HEAD(&group->active_list);
  512. INIT_WORK(&group->work, mcast_work_handler);
  513. spin_lock_init(&group->lock);
  514. spin_lock_irqsave(&port->lock, flags);
  515. cur_group = mcast_insert(port, group, is_mgid0);
  516. if (cur_group) {
  517. kfree(group);
  518. group = cur_group;
  519. } else
  520. atomic_inc(&port->refcount);
  521. found:
  522. atomic_inc(&group->refcount);
  523. spin_unlock_irqrestore(&port->lock, flags);
  524. return group;
  525. }
  526. /*
  527. * We serialize all join requests to a single group to make our lives much
  528. * easier. Otherwise, two users could try to join the same group
  529. * simultaneously, with different configurations, one could leave while the
  530. * join is in progress, etc., which makes locking around error recovery
  531. * difficult.
  532. */
  533. struct ib_sa_multicast *
  534. ib_sa_join_multicast(struct ib_sa_client *client,
  535. struct ib_device *device, u8 port_num,
  536. struct ib_sa_mcmember_rec *rec,
  537. ib_sa_comp_mask comp_mask, gfp_t gfp_mask,
  538. int (*callback)(int status,
  539. struct ib_sa_multicast *multicast),
  540. void *context)
  541. {
  542. struct mcast_device *dev;
  543. struct mcast_member *member;
  544. struct ib_sa_multicast *multicast;
  545. int ret;
  546. dev = ib_get_client_data(device, &mcast_client);
  547. if (!dev)
  548. return ERR_PTR(-ENODEV);
  549. member = kmalloc(sizeof *member, gfp_mask);
  550. if (!member)
  551. return ERR_PTR(-ENOMEM);
  552. ib_sa_client_get(client);
  553. member->client = client;
  554. member->multicast.rec = *rec;
  555. member->multicast.comp_mask = comp_mask;
  556. member->multicast.callback = callback;
  557. member->multicast.context = context;
  558. init_completion(&member->comp);
  559. atomic_set(&member->refcount, 1);
  560. member->state = MCAST_JOINING;
  561. member->group = acquire_group(&dev->port[port_num - dev->start_port],
  562. &rec->mgid, gfp_mask);
  563. if (!member->group) {
  564. ret = -ENOMEM;
  565. goto err;
  566. }
  567. /*
  568. * The user will get the multicast structure in their callback. They
  569. * could then free the multicast structure before we can return from
  570. * this routine. So we save the pointer to return before queuing
  571. * any callback.
  572. */
  573. multicast = &member->multicast;
  574. queue_join(member);
  575. return multicast;
  576. err:
  577. ib_sa_client_put(client);
  578. kfree(member);
  579. return ERR_PTR(ret);
  580. }
  581. EXPORT_SYMBOL(ib_sa_join_multicast);
  582. void ib_sa_free_multicast(struct ib_sa_multicast *multicast)
  583. {
  584. struct mcast_member *member;
  585. struct mcast_group *group;
  586. member = container_of(multicast, struct mcast_member, multicast);
  587. group = member->group;
  588. spin_lock_irq(&group->lock);
  589. if (member->state == MCAST_MEMBER)
  590. adjust_membership(group, multicast->rec.join_state, -1);
  591. list_del_init(&member->list);
  592. if (group->state == MCAST_IDLE) {
  593. group->state = MCAST_BUSY;
  594. spin_unlock_irq(&group->lock);
  595. /* Continue to hold reference on group until callback */
  596. queue_work(mcast_wq, &group->work);
  597. } else {
  598. spin_unlock_irq(&group->lock);
  599. release_group(group);
  600. }
  601. deref_member(member);
  602. wait_for_completion(&member->comp);
  603. ib_sa_client_put(member->client);
  604. kfree(member);
  605. }
  606. EXPORT_SYMBOL(ib_sa_free_multicast);
  607. int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num,
  608. union ib_gid *mgid, struct ib_sa_mcmember_rec *rec)
  609. {
  610. struct mcast_device *dev;
  611. struct mcast_port *port;
  612. struct mcast_group *group;
  613. unsigned long flags;
  614. int ret = 0;
  615. dev = ib_get_client_data(device, &mcast_client);
  616. if (!dev)
  617. return -ENODEV;
  618. port = &dev->port[port_num - dev->start_port];
  619. spin_lock_irqsave(&port->lock, flags);
  620. group = mcast_find(port, mgid);
  621. if (group)
  622. *rec = group->rec;
  623. else
  624. ret = -EADDRNOTAVAIL;
  625. spin_unlock_irqrestore(&port->lock, flags);
  626. return ret;
  627. }
  628. EXPORT_SYMBOL(ib_sa_get_mcmember_rec);
  629. int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
  630. struct ib_sa_mcmember_rec *rec,
  631. struct ib_ah_attr *ah_attr)
  632. {
  633. int ret;
  634. u16 gid_index;
  635. u8 p;
  636. ret = ib_find_cached_gid(device, &rec->port_gid, &p, &gid_index);
  637. if (ret)
  638. return ret;
  639. memset(ah_attr, 0, sizeof *ah_attr);
  640. ah_attr->dlid = be16_to_cpu(rec->mlid);
  641. ah_attr->sl = rec->sl;
  642. ah_attr->port_num = port_num;
  643. ah_attr->static_rate = rec->rate;
  644. ah_attr->ah_flags = IB_AH_GRH;
  645. ah_attr->grh.dgid = rec->mgid;
  646. ah_attr->grh.sgid_index = (u8) gid_index;
  647. ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
  648. ah_attr->grh.hop_limit = rec->hop_limit;
  649. ah_attr->grh.traffic_class = rec->traffic_class;
  650. return 0;
  651. }
  652. EXPORT_SYMBOL(ib_init_ah_from_mcmember);
  653. static void mcast_groups_event(struct mcast_port *port,
  654. enum mcast_group_state state)
  655. {
  656. struct mcast_group *group;
  657. struct rb_node *node;
  658. unsigned long flags;
  659. spin_lock_irqsave(&port->lock, flags);
  660. for (node = rb_first(&port->table); node; node = rb_next(node)) {
  661. group = rb_entry(node, struct mcast_group, node);
  662. spin_lock(&group->lock);
  663. if (group->state == MCAST_IDLE) {
  664. atomic_inc(&group->refcount);
  665. queue_work(mcast_wq, &group->work);
  666. }
  667. if (group->state != MCAST_GROUP_ERROR)
  668. group->state = state;
  669. spin_unlock(&group->lock);
  670. }
  671. spin_unlock_irqrestore(&port->lock, flags);
  672. }
  673. static void mcast_event_handler(struct ib_event_handler *handler,
  674. struct ib_event *event)
  675. {
  676. struct mcast_device *dev;
  677. int index;
  678. dev = container_of(handler, struct mcast_device, event_handler);
  679. if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
  680. IB_LINK_LAYER_INFINIBAND)
  681. return;
  682. index = event->element.port_num - dev->start_port;
  683. switch (event->event) {
  684. case IB_EVENT_PORT_ERR:
  685. case IB_EVENT_LID_CHANGE:
  686. case IB_EVENT_SM_CHANGE:
  687. case IB_EVENT_CLIENT_REREGISTER:
  688. mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR);
  689. break;
  690. case IB_EVENT_PKEY_CHANGE:
  691. mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT);
  692. break;
  693. default:
  694. break;
  695. }
  696. }
  697. static void mcast_add_one(struct ib_device *device)
  698. {
  699. struct mcast_device *dev;
  700. struct mcast_port *port;
  701. int i;
  702. int count = 0;
  703. if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
  704. return;
  705. dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port,
  706. GFP_KERNEL);
  707. if (!dev)
  708. return;
  709. if (device->node_type == RDMA_NODE_IB_SWITCH)
  710. dev->start_port = dev->end_port = 0;
  711. else {
  712. dev->start_port = 1;
  713. dev->end_port = device->phys_port_cnt;
  714. }
  715. for (i = 0; i <= dev->end_port - dev->start_port; i++) {
  716. if (rdma_port_get_link_layer(device, dev->start_port + i) !=
  717. IB_LINK_LAYER_INFINIBAND)
  718. continue;
  719. port = &dev->port[i];
  720. port->dev = dev;
  721. port->port_num = dev->start_port + i;
  722. spin_lock_init(&port->lock);
  723. port->table = RB_ROOT;
  724. init_completion(&port->comp);
  725. atomic_set(&port->refcount, 1);
  726. ++count;
  727. }
  728. if (!count) {
  729. kfree(dev);
  730. return;
  731. }
  732. dev->device = device;
  733. ib_set_client_data(device, &mcast_client, dev);
  734. INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler);
  735. ib_register_event_handler(&dev->event_handler);
  736. }
  737. static void mcast_remove_one(struct ib_device *device)
  738. {
  739. struct mcast_device *dev;
  740. struct mcast_port *port;
  741. int i;
  742. dev = ib_get_client_data(device, &mcast_client);
  743. if (!dev)
  744. return;
  745. ib_unregister_event_handler(&dev->event_handler);
  746. flush_workqueue(mcast_wq);
  747. for (i = 0; i <= dev->end_port - dev->start_port; i++) {
  748. if (rdma_port_get_link_layer(device, dev->start_port + i) ==
  749. IB_LINK_LAYER_INFINIBAND) {
  750. port = &dev->port[i];
  751. deref_port(port);
  752. wait_for_completion(&port->comp);
  753. }
  754. }
  755. kfree(dev);
  756. }
  757. int mcast_init(void)
  758. {
  759. int ret;
  760. mcast_wq = create_singlethread_workqueue("ib_mcast");
  761. if (!mcast_wq)
  762. return -ENOMEM;
  763. ib_sa_register_client(&sa_client);
  764. ret = ib_register_client(&mcast_client);
  765. if (ret)
  766. goto err;
  767. return 0;
  768. err:
  769. ib_sa_unregister_client(&sa_client);
  770. destroy_workqueue(mcast_wq);
  771. return ret;
  772. }
  773. void mcast_cleanup(void)
  774. {
  775. ib_unregister_client(&mcast_client);
  776. ib_sa_unregister_client(&sa_client);
  777. destroy_workqueue(mcast_wq);
  778. }