bcast.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575
  1. /*
  2. * net/tipc/bcast.c: TIPC broadcast code
  3. *
  4. * Copyright (c) 2004-2006, 2014-2016, Ericsson AB
  5. * Copyright (c) 2004, Intel Corporation.
  6. * Copyright (c) 2005, 2010-2011, Wind River Systems
  7. * All rights reserved.
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions are met:
  11. *
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. * 3. Neither the names of the copyright holders nor the names of its
  18. * contributors may be used to endorse or promote products derived from
  19. * this software without specific prior written permission.
  20. *
  21. * Alternatively, this software may be distributed under the terms of the
  22. * GNU General Public License ("GPL") version 2 as published by the Free
  23. * Software Foundation.
  24. *
  25. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  26. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  27. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  28. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  29. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  30. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  31. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  32. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  33. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  34. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  35. * POSSIBILITY OF SUCH DAMAGE.
  36. */
  37. #include <linux/tipc_config.h>
  38. #include "socket.h"
  39. #include "msg.h"
  40. #include "bcast.h"
  41. #include "link.h"
  42. #include "name_table.h"
  43. #define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
  44. #define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
  45. const char tipc_bclink_name[] = "broadcast-link";
  46. /**
  47. * struct tipc_bc_base - base structure for keeping broadcast send state
  48. * @link: broadcast send link structure
  49. * @inputq: data input queue; will only carry SOCK_WAKEUP messages
  50. * @dest: array keeping number of reachable destinations per bearer
  51. * @primary_bearer: a bearer having links to all broadcast destinations, if any
  52. * @bcast_support: indicates if primary bearer, if any, supports broadcast
  53. * @rcast_support: indicates if all peer nodes support replicast
  54. * @rc_ratio: dest count as percentage of cluster size where send method changes
  55. * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
  56. */
  57. struct tipc_bc_base {
  58. struct tipc_link *link;
  59. struct sk_buff_head inputq;
  60. int dests[MAX_BEARERS];
  61. int primary_bearer;
  62. bool bcast_support;
  63. bool rcast_support;
  64. int rc_ratio;
  65. int bc_threshold;
  66. };
  67. static struct tipc_bc_base *tipc_bc_base(struct net *net)
  68. {
  69. return tipc_net(net)->bcbase;
  70. }
  71. int tipc_bcast_get_mtu(struct net *net)
  72. {
  73. return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
  74. }
  75. void tipc_bcast_disable_rcast(struct net *net)
  76. {
  77. tipc_bc_base(net)->rcast_support = false;
  78. }
  79. static void tipc_bcbase_calc_bc_threshold(struct net *net)
  80. {
  81. struct tipc_bc_base *bb = tipc_bc_base(net);
  82. int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net));
  83. bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100);
  84. }
  85. /* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
  86. * if any, and make it primary bearer
  87. */
  88. static void tipc_bcbase_select_primary(struct net *net)
  89. {
  90. struct tipc_bc_base *bb = tipc_bc_base(net);
  91. int all_dests = tipc_link_bc_peers(bb->link);
  92. int i, mtu, prim;
  93. bb->primary_bearer = INVALID_BEARER_ID;
  94. bb->bcast_support = true;
  95. if (!all_dests)
  96. return;
  97. for (i = 0; i < MAX_BEARERS; i++) {
  98. if (!bb->dests[i])
  99. continue;
  100. mtu = tipc_bearer_mtu(net, i);
  101. if (mtu < tipc_link_mtu(bb->link))
  102. tipc_link_set_mtu(bb->link, mtu);
  103. bb->bcast_support &= tipc_bearer_bcast_support(net, i);
  104. if (bb->dests[i] < all_dests)
  105. continue;
  106. bb->primary_bearer = i;
  107. /* Reduce risk that all nodes select same primary */
  108. if ((i ^ tipc_own_addr(net)) & 1)
  109. break;
  110. }
  111. prim = bb->primary_bearer;
  112. if (prim != INVALID_BEARER_ID)
  113. bb->bcast_support = tipc_bearer_bcast_support(net, prim);
  114. }
  115. void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
  116. {
  117. struct tipc_bc_base *bb = tipc_bc_base(net);
  118. tipc_bcast_lock(net);
  119. bb->dests[bearer_id]++;
  120. tipc_bcbase_select_primary(net);
  121. tipc_bcast_unlock(net);
  122. }
  123. void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id)
  124. {
  125. struct tipc_bc_base *bb = tipc_bc_base(net);
  126. tipc_bcast_lock(net);
  127. bb->dests[bearer_id]--;
  128. tipc_bcbase_select_primary(net);
  129. tipc_bcast_unlock(net);
  130. }
  131. /* tipc_bcbase_xmit - broadcast a packet queue across one or more bearers
  132. *
  133. * Note that number of reachable destinations, as indicated in the dests[]
  134. * array, may transitionally differ from the number of destinations indicated
  135. * in each sent buffer. We can sustain this. Excess destination nodes will
  136. * drop and never acknowledge the unexpected packets, and missing destinations
  137. * will either require retransmission (if they are just about to be added to
  138. * the bearer), or be removed from the buffer's 'ackers' counter (if they
  139. * just went down)
  140. */
  141. static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
  142. {
  143. int bearer_id;
  144. struct tipc_bc_base *bb = tipc_bc_base(net);
  145. struct sk_buff *skb, *_skb;
  146. struct sk_buff_head _xmitq;
  147. if (skb_queue_empty(xmitq))
  148. return;
  149. /* The typical case: at least one bearer has links to all nodes */
  150. bearer_id = bb->primary_bearer;
  151. if (bearer_id >= 0) {
  152. tipc_bearer_bc_xmit(net, bearer_id, xmitq);
  153. return;
  154. }
  155. /* We have to transmit across all bearers */
  156. skb_queue_head_init(&_xmitq);
  157. for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
  158. if (!bb->dests[bearer_id])
  159. continue;
  160. skb_queue_walk(xmitq, skb) {
  161. _skb = pskb_copy_for_clone(skb, GFP_ATOMIC);
  162. if (!_skb)
  163. break;
  164. __skb_queue_tail(&_xmitq, _skb);
  165. }
  166. tipc_bearer_bc_xmit(net, bearer_id, &_xmitq);
  167. }
  168. __skb_queue_purge(xmitq);
  169. __skb_queue_purge(&_xmitq);
  170. }
  171. static void tipc_bcast_select_xmit_method(struct net *net, int dests,
  172. struct tipc_mc_method *method)
  173. {
  174. struct tipc_bc_base *bb = tipc_bc_base(net);
  175. unsigned long exp = method->expires;
  176. /* Broadcast supported by used bearer/bearers? */
  177. if (!bb->bcast_support) {
  178. method->rcast = true;
  179. return;
  180. }
  181. /* Any destinations which don't support replicast ? */
  182. if (!bb->rcast_support) {
  183. method->rcast = false;
  184. return;
  185. }
  186. /* Can current method be changed ? */
  187. method->expires = jiffies + TIPC_METHOD_EXPIRE;
  188. if (method->mandatory || time_before(jiffies, exp))
  189. return;
  190. /* Determine method to use now */
  191. method->rcast = dests <= bb->bc_threshold;
  192. }
  193. /* tipc_bcast_xmit - broadcast the buffer chain to all external nodes
  194. * @net: the applicable net namespace
  195. * @pkts: chain of buffers containing message
  196. * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0
  197. * Consumes the buffer chain.
  198. * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
  199. */
  200. static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
  201. u16 *cong_link_cnt)
  202. {
  203. struct tipc_link *l = tipc_bc_sndlink(net);
  204. struct sk_buff_head xmitq;
  205. int rc = 0;
  206. skb_queue_head_init(&xmitq);
  207. tipc_bcast_lock(net);
  208. if (tipc_link_bc_peers(l))
  209. rc = tipc_link_xmit(l, pkts, &xmitq);
  210. tipc_bcast_unlock(net);
  211. tipc_bcbase_xmit(net, &xmitq);
  212. __skb_queue_purge(pkts);
  213. if (rc == -ELINKCONG) {
  214. *cong_link_cnt = 1;
  215. rc = 0;
  216. }
  217. return rc;
  218. }
  219. /* tipc_rcast_xmit - replicate and send a message to given destination nodes
  220. * @net: the applicable net namespace
  221. * @pkts: chain of buffers containing message
  222. * @dests: list of destination nodes
  223. * @cong_link_cnt: returns number of congested links
  224. * @cong_links: returns identities of congested links
  225. * Returns 0 if success, otherwise errno
  226. */
  227. static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
  228. struct tipc_nlist *dests, u16 *cong_link_cnt)
  229. {
  230. struct sk_buff_head _pkts;
  231. struct u32_item *n, *tmp;
  232. u32 dst, selector;
  233. selector = msg_link_selector(buf_msg(skb_peek(pkts)));
  234. skb_queue_head_init(&_pkts);
  235. list_for_each_entry_safe(n, tmp, &dests->list, list) {
  236. dst = n->value;
  237. if (!tipc_msg_pskb_copy(dst, pkts, &_pkts))
  238. return -ENOMEM;
  239. /* Any other return value than -ELINKCONG is ignored */
  240. if (tipc_node_xmit(net, &_pkts, dst, selector) == -ELINKCONG)
  241. (*cong_link_cnt)++;
  242. }
  243. return 0;
  244. }
  245. /* tipc_mcast_xmit - deliver message to indicated destination nodes
  246. * and to identified node local sockets
  247. * @net: the applicable net namespace
  248. * @pkts: chain of buffers containing message
  249. * @method: send method to be used
  250. * @dests: destination nodes for message.
  251. * @cong_link_cnt: returns number of encountered congested destination links
  252. * Consumes buffer chain.
  253. * Returns 0 if success, otherwise errno
  254. */
  255. int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
  256. struct tipc_mc_method *method, struct tipc_nlist *dests,
  257. u16 *cong_link_cnt)
  258. {
  259. struct sk_buff_head inputq, localq;
  260. int rc = 0;
  261. skb_queue_head_init(&inputq);
  262. skb_queue_head_init(&localq);
  263. /* Clone packets before they are consumed by next call */
  264. if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
  265. rc = -ENOMEM;
  266. goto exit;
  267. }
  268. /* Send according to determined transmit method */
  269. if (dests->remote) {
  270. tipc_bcast_select_xmit_method(net, dests->remote, method);
  271. if (method->rcast)
  272. rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
  273. else
  274. rc = tipc_bcast_xmit(net, pkts, cong_link_cnt);
  275. }
  276. if (dests->local)
  277. tipc_sk_mcast_rcv(net, &localq, &inputq);
  278. exit:
  279. /* This queue should normally be empty by now */
  280. __skb_queue_purge(pkts);
  281. return rc;
  282. }
  283. /* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
  284. *
  285. * RCU is locked, no other locks set
  286. */
  287. int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
  288. {
  289. struct tipc_msg *hdr = buf_msg(skb);
  290. struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
  291. struct sk_buff_head xmitq;
  292. int rc;
  293. __skb_queue_head_init(&xmitq);
  294. if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
  295. kfree_skb(skb);
  296. return 0;
  297. }
  298. tipc_bcast_lock(net);
  299. if (msg_user(hdr) == BCAST_PROTOCOL)
  300. rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
  301. else
  302. rc = tipc_link_rcv(l, skb, NULL);
  303. tipc_bcast_unlock(net);
  304. tipc_bcbase_xmit(net, &xmitq);
  305. /* Any socket wakeup messages ? */
  306. if (!skb_queue_empty(inputq))
  307. tipc_sk_rcv(net, inputq);
  308. return rc;
  309. }
  310. /* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
  311. *
  312. * RCU is locked, no other locks set
  313. */
  314. void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
  315. struct tipc_msg *hdr)
  316. {
  317. struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
  318. u16 acked = msg_bcast_ack(hdr);
  319. struct sk_buff_head xmitq;
  320. /* Ignore bc acks sent by peer before bcast synch point was received */
  321. if (msg_bc_ack_invalid(hdr))
  322. return;
  323. __skb_queue_head_init(&xmitq);
  324. tipc_bcast_lock(net);
  325. tipc_link_bc_ack_rcv(l, acked, &xmitq);
  326. tipc_bcast_unlock(net);
  327. tipc_bcbase_xmit(net, &xmitq);
  328. /* Any socket wakeup messages ? */
  329. if (!skb_queue_empty(inputq))
  330. tipc_sk_rcv(net, inputq);
  331. }
  332. /* tipc_bcast_synch_rcv - check and update rcv link with peer's send state
  333. *
  334. * RCU is locked, no other locks set
  335. */
  336. int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
  337. struct tipc_msg *hdr)
  338. {
  339. struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
  340. struct sk_buff_head xmitq;
  341. int rc = 0;
  342. __skb_queue_head_init(&xmitq);
  343. tipc_bcast_lock(net);
  344. if (msg_type(hdr) != STATE_MSG) {
  345. tipc_link_bc_init_rcv(l, hdr);
  346. } else if (!msg_bc_ack_invalid(hdr)) {
  347. tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
  348. rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
  349. }
  350. tipc_bcast_unlock(net);
  351. tipc_bcbase_xmit(net, &xmitq);
  352. /* Any socket wakeup messages ? */
  353. if (!skb_queue_empty(inputq))
  354. tipc_sk_rcv(net, inputq);
  355. return rc;
  356. }
  357. /* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
  358. *
  359. * RCU is locked, node lock is set
  360. */
  361. void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
  362. struct sk_buff_head *xmitq)
  363. {
  364. struct tipc_link *snd_l = tipc_bc_sndlink(net);
  365. tipc_bcast_lock(net);
  366. tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
  367. tipc_bcbase_select_primary(net);
  368. tipc_bcbase_calc_bc_threshold(net);
  369. tipc_bcast_unlock(net);
  370. }
  371. /* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
  372. *
  373. * RCU is locked, node lock is set
  374. */
  375. void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
  376. {
  377. struct tipc_link *snd_l = tipc_bc_sndlink(net);
  378. struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
  379. struct sk_buff_head xmitq;
  380. __skb_queue_head_init(&xmitq);
  381. tipc_bcast_lock(net);
  382. tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
  383. tipc_bcbase_select_primary(net);
  384. tipc_bcbase_calc_bc_threshold(net);
  385. tipc_bcast_unlock(net);
  386. tipc_bcbase_xmit(net, &xmitq);
  387. /* Any socket wakeup messages ? */
  388. if (!skb_queue_empty(inputq))
  389. tipc_sk_rcv(net, inputq);
  390. }
  391. int tipc_bclink_reset_stats(struct net *net)
  392. {
  393. struct tipc_link *l = tipc_bc_sndlink(net);
  394. if (!l)
  395. return -ENOPROTOOPT;
  396. tipc_bcast_lock(net);
  397. tipc_link_reset_stats(l);
  398. tipc_bcast_unlock(net);
  399. return 0;
  400. }
  401. static int tipc_bc_link_set_queue_limits(struct net *net, u32 limit)
  402. {
  403. struct tipc_link *l = tipc_bc_sndlink(net);
  404. if (!l)
  405. return -ENOPROTOOPT;
  406. if (limit < BCLINK_WIN_MIN)
  407. limit = BCLINK_WIN_MIN;
  408. if (limit > TIPC_MAX_LINK_WIN)
  409. return -EINVAL;
  410. tipc_bcast_lock(net);
  411. tipc_link_set_queue_limits(l, limit);
  412. tipc_bcast_unlock(net);
  413. return 0;
  414. }
  415. int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
  416. {
  417. int err;
  418. u32 win;
  419. struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
  420. if (!attrs[TIPC_NLA_LINK_PROP])
  421. return -EINVAL;
  422. err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
  423. if (err)
  424. return err;
  425. if (!props[TIPC_NLA_PROP_WIN])
  426. return -EOPNOTSUPP;
  427. win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
  428. return tipc_bc_link_set_queue_limits(net, win);
  429. }
  430. int tipc_bcast_init(struct net *net)
  431. {
  432. struct tipc_net *tn = tipc_net(net);
  433. struct tipc_bc_base *bb = NULL;
  434. struct tipc_link *l = NULL;
  435. bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
  436. if (!bb)
  437. goto enomem;
  438. tn->bcbase = bb;
  439. spin_lock_init(&tipc_net(net)->bclock);
  440. if (!tipc_link_bc_create(net, 0, 0,
  441. U16_MAX,
  442. BCLINK_WIN_DEFAULT,
  443. 0,
  444. &bb->inputq,
  445. NULL,
  446. NULL,
  447. &l))
  448. goto enomem;
  449. bb->link = l;
  450. tn->bcl = l;
  451. bb->rc_ratio = 25;
  452. bb->rcast_support = true;
  453. return 0;
  454. enomem:
  455. kfree(bb);
  456. kfree(l);
  457. return -ENOMEM;
  458. }
  459. void tipc_bcast_stop(struct net *net)
  460. {
  461. struct tipc_net *tn = net_generic(net, tipc_net_id);
  462. synchronize_net();
  463. kfree(tn->bcbase);
  464. kfree(tn->bcl);
  465. }
  466. void tipc_nlist_init(struct tipc_nlist *nl, u32 self)
  467. {
  468. memset(nl, 0, sizeof(*nl));
  469. INIT_LIST_HEAD(&nl->list);
  470. nl->self = self;
  471. }
  472. void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
  473. {
  474. if (node == nl->self)
  475. nl->local = true;
  476. else if (u32_push(&nl->list, node))
  477. nl->remote++;
  478. }
  479. void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
  480. {
  481. if (node == nl->self)
  482. nl->local = false;
  483. else if (u32_del(&nl->list, node))
  484. nl->remote--;
  485. }
  486. void tipc_nlist_purge(struct tipc_nlist *nl)
  487. {
  488. u32_list_purge(&nl->list);
  489. nl->remote = 0;
  490. nl->local = 0;
  491. }