conn_object.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453
  1. /* RxRPC virtual connection handler, common bits.
  2. *
  3. * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <linux/net.h>
  15. #include <linux/skbuff.h>
  16. #include "ar-internal.h"
  17. /*
  18. * Time till a connection expires after last use (in seconds).
  19. */
  20. unsigned int rxrpc_connection_expiry = 10 * 60;
  21. static void rxrpc_connection_reaper(struct work_struct *work);
  22. LIST_HEAD(rxrpc_connections);
  23. LIST_HEAD(rxrpc_connection_proc_list);
  24. DEFINE_RWLOCK(rxrpc_connection_lock);
  25. static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
  26. static void rxrpc_destroy_connection(struct rcu_head *);
  27. /*
  28. * allocate a new connection
  29. */
  30. struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  31. {
  32. struct rxrpc_connection *conn;
  33. _enter("");
  34. conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
  35. if (conn) {
  36. INIT_LIST_HEAD(&conn->cache_link);
  37. spin_lock_init(&conn->channel_lock);
  38. INIT_LIST_HEAD(&conn->waiting_calls);
  39. INIT_WORK(&conn->processor, &rxrpc_process_connection);
  40. INIT_LIST_HEAD(&conn->proc_link);
  41. INIT_LIST_HEAD(&conn->link);
  42. skb_queue_head_init(&conn->rx_queue);
  43. conn->security = &rxrpc_no_security;
  44. spin_lock_init(&conn->state_lock);
  45. conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
  46. conn->size_align = 4;
  47. conn->idle_timestamp = jiffies;
  48. }
  49. _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
  50. return conn;
  51. }
  52. /*
  53. * Look up a connection in the cache by protocol parameters.
  54. *
  55. * If successful, a pointer to the connection is returned, but no ref is taken.
  56. * NULL is returned if there is no match.
  57. *
  58. * The caller must be holding the RCU read lock.
  59. */
  60. struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
  61. struct sk_buff *skb)
  62. {
  63. struct rxrpc_connection *conn;
  64. struct rxrpc_conn_proto k;
  65. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  66. struct sockaddr_rxrpc srx;
  67. struct rxrpc_peer *peer;
  68. _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
  69. if (rxrpc_extract_addr_from_skb(&srx, skb) < 0)
  70. goto not_found;
  71. k.epoch = sp->hdr.epoch;
  72. k.cid = sp->hdr.cid & RXRPC_CIDMASK;
  73. /* We may have to handle mixing IPv4 and IPv6 */
  74. if (srx.transport.family != local->srx.transport.family) {
  75. pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
  76. srx.transport.family,
  77. local->srx.transport.family);
  78. goto not_found;
  79. }
  80. k.epoch = sp->hdr.epoch;
  81. k.cid = sp->hdr.cid & RXRPC_CIDMASK;
  82. if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
  83. /* We need to look up service connections by the full protocol
  84. * parameter set. We look up the peer first as an intermediate
  85. * step and then the connection from the peer's tree.
  86. */
  87. peer = rxrpc_lookup_peer_rcu(local, &srx);
  88. if (!peer)
  89. goto not_found;
  90. conn = rxrpc_find_service_conn_rcu(peer, skb);
  91. if (!conn || atomic_read(&conn->usage) == 0)
  92. goto not_found;
  93. _leave(" = %p", conn);
  94. return conn;
  95. } else {
  96. /* Look up client connections by connection ID alone as their
  97. * IDs are unique for this machine.
  98. */
  99. conn = idr_find(&rxrpc_client_conn_ids,
  100. sp->hdr.cid >> RXRPC_CIDSHIFT);
  101. if (!conn || atomic_read(&conn->usage) == 0) {
  102. _debug("no conn");
  103. goto not_found;
  104. }
  105. if (conn->proto.epoch != k.epoch ||
  106. conn->params.local != local)
  107. goto not_found;
  108. peer = conn->params.peer;
  109. switch (srx.transport.family) {
  110. case AF_INET:
  111. if (peer->srx.transport.sin.sin_port !=
  112. srx.transport.sin.sin_port ||
  113. peer->srx.transport.sin.sin_addr.s_addr !=
  114. srx.transport.sin.sin_addr.s_addr)
  115. goto not_found;
  116. break;
  117. #ifdef CONFIG_AF_RXRPC_IPV6
  118. case AF_INET6:
  119. if (peer->srx.transport.sin6.sin6_port !=
  120. srx.transport.sin6.sin6_port ||
  121. memcmp(&peer->srx.transport.sin6.sin6_addr,
  122. &srx.transport.sin6.sin6_addr,
  123. sizeof(struct in6_addr)) != 0)
  124. goto not_found;
  125. break;
  126. #endif
  127. default:
  128. BUG();
  129. }
  130. _leave(" = %p", conn);
  131. return conn;
  132. }
  133. not_found:
  134. _leave(" = NULL");
  135. return NULL;
  136. }
  137. /*
  138. * Disconnect a call and clear any channel it occupies when that call
  139. * terminates. The caller must hold the channel_lock and must release the
  140. * call's ref on the connection.
  141. */
  142. void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
  143. struct rxrpc_call *call)
  144. {
  145. struct rxrpc_channel *chan =
  146. &conn->channels[call->cid & RXRPC_CHANNELMASK];
  147. _enter("%d,%x", conn->debug_id, call->cid);
  148. if (rcu_access_pointer(chan->call) == call) {
  149. /* Save the result of the call so that we can repeat it if necessary
  150. * through the channel, whilst disposing of the actual call record.
  151. */
  152. chan->last_service_id = call->service_id;
  153. if (call->abort_code) {
  154. chan->last_abort = call->abort_code;
  155. chan->last_type = RXRPC_PACKET_TYPE_ABORT;
  156. } else {
  157. chan->last_seq = call->rx_hard_ack;
  158. chan->last_type = RXRPC_PACKET_TYPE_ACK;
  159. }
  160. /* Sync with rxrpc_conn_retransmit(). */
  161. smp_wmb();
  162. chan->last_call = chan->call_id;
  163. chan->call_id = chan->call_counter;
  164. rcu_assign_pointer(chan->call, NULL);
  165. }
  166. _leave("");
  167. }
  168. /*
  169. * Disconnect a call and clear any channel it occupies when that call
  170. * terminates.
  171. */
  172. void rxrpc_disconnect_call(struct rxrpc_call *call)
  173. {
  174. struct rxrpc_connection *conn = call->conn;
  175. spin_lock_bh(&conn->params.peer->lock);
  176. hlist_del_init(&call->error_link);
  177. spin_unlock_bh(&conn->params.peer->lock);
  178. if (rxrpc_is_client_call(call))
  179. return rxrpc_disconnect_client_call(call);
  180. spin_lock(&conn->channel_lock);
  181. __rxrpc_disconnect_call(conn, call);
  182. spin_unlock(&conn->channel_lock);
  183. call->conn = NULL;
  184. conn->idle_timestamp = jiffies;
  185. rxrpc_put_connection(conn);
  186. }
  187. /*
  188. * Kill off a connection.
  189. */
  190. void rxrpc_kill_connection(struct rxrpc_connection *conn)
  191. {
  192. ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
  193. !rcu_access_pointer(conn->channels[1].call) &&
  194. !rcu_access_pointer(conn->channels[2].call) &&
  195. !rcu_access_pointer(conn->channels[3].call));
  196. ASSERT(list_empty(&conn->cache_link));
  197. write_lock(&rxrpc_connection_lock);
  198. list_del_init(&conn->proc_link);
  199. write_unlock(&rxrpc_connection_lock);
  200. /* Drain the Rx queue. Note that even though we've unpublished, an
  201. * incoming packet could still be being added to our Rx queue, so we
  202. * will need to drain it again in the RCU cleanup handler.
  203. */
  204. rxrpc_purge_queue(&conn->rx_queue);
  205. /* Leave final destruction to RCU. The connection processor work item
  206. * must carry a ref on the connection to prevent us getting here whilst
  207. * it is queued or running.
  208. */
  209. call_rcu(&conn->rcu, rxrpc_destroy_connection);
  210. }
  211. /*
  212. * Queue a connection's work processor, getting a ref to pass to the work
  213. * queue.
  214. */
  215. bool rxrpc_queue_conn(struct rxrpc_connection *conn)
  216. {
  217. const void *here = __builtin_return_address(0);
  218. int n = __atomic_add_unless(&conn->usage, 1, 0);
  219. if (n == 0)
  220. return false;
  221. if (rxrpc_queue_work(&conn->processor))
  222. trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
  223. else
  224. rxrpc_put_connection(conn);
  225. return true;
  226. }
  227. /*
  228. * Note the re-emergence of a connection.
  229. */
  230. void rxrpc_see_connection(struct rxrpc_connection *conn)
  231. {
  232. const void *here = __builtin_return_address(0);
  233. if (conn) {
  234. int n = atomic_read(&conn->usage);
  235. trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
  236. }
  237. }
  238. /*
  239. * Get a ref on a connection.
  240. */
  241. void rxrpc_get_connection(struct rxrpc_connection *conn)
  242. {
  243. const void *here = __builtin_return_address(0);
  244. int n = atomic_inc_return(&conn->usage);
  245. trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
  246. }
  247. /*
  248. * Try to get a ref on a connection.
  249. */
  250. struct rxrpc_connection *
  251. rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
  252. {
  253. const void *here = __builtin_return_address(0);
  254. if (conn) {
  255. int n = __atomic_add_unless(&conn->usage, 1, 0);
  256. if (n > 0)
  257. trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
  258. else
  259. conn = NULL;
  260. }
  261. return conn;
  262. }
  263. /*
  264. * Release a service connection
  265. */
  266. void rxrpc_put_service_conn(struct rxrpc_connection *conn)
  267. {
  268. const void *here = __builtin_return_address(0);
  269. int n;
  270. n = atomic_dec_return(&conn->usage);
  271. trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
  272. ASSERTCMP(n, >=, 0);
  273. if (n == 0)
  274. rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
  275. }
  276. /*
  277. * destroy a virtual connection
  278. */
  279. static void rxrpc_destroy_connection(struct rcu_head *rcu)
  280. {
  281. struct rxrpc_connection *conn =
  282. container_of(rcu, struct rxrpc_connection, rcu);
  283. _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
  284. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  285. _net("DESTROY CONN %d", conn->debug_id);
  286. rxrpc_purge_queue(&conn->rx_queue);
  287. conn->security->clear(conn);
  288. key_put(conn->params.key);
  289. key_put(conn->server_key);
  290. rxrpc_put_peer(conn->params.peer);
  291. rxrpc_put_local(conn->params.local);
  292. kfree(conn);
  293. _leave("");
  294. }
  295. /*
  296. * reap dead service connections
  297. */
  298. static void rxrpc_connection_reaper(struct work_struct *work)
  299. {
  300. struct rxrpc_connection *conn, *_p;
  301. unsigned long reap_older_than, earliest, idle_timestamp, now;
  302. LIST_HEAD(graveyard);
  303. _enter("");
  304. now = jiffies;
  305. reap_older_than = now - rxrpc_connection_expiry * HZ;
  306. earliest = ULONG_MAX;
  307. write_lock(&rxrpc_connection_lock);
  308. list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
  309. ASSERTCMP(atomic_read(&conn->usage), >, 0);
  310. if (likely(atomic_read(&conn->usage) > 1))
  311. continue;
  312. if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
  313. continue;
  314. idle_timestamp = READ_ONCE(conn->idle_timestamp);
  315. _debug("reap CONN %d { u=%d,t=%ld }",
  316. conn->debug_id, atomic_read(&conn->usage),
  317. (long)reap_older_than - (long)idle_timestamp);
  318. if (time_after(idle_timestamp, reap_older_than)) {
  319. if (time_before(idle_timestamp, earliest))
  320. earliest = idle_timestamp;
  321. continue;
  322. }
  323. /* The usage count sits at 1 whilst the object is unused on the
  324. * list; we reduce that to 0 to make the object unavailable.
  325. */
  326. if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
  327. continue;
  328. if (rxrpc_conn_is_client(conn))
  329. BUG();
  330. else
  331. rxrpc_unpublish_service_conn(conn);
  332. list_move_tail(&conn->link, &graveyard);
  333. }
  334. write_unlock(&rxrpc_connection_lock);
  335. if (earliest != ULONG_MAX) {
  336. _debug("reschedule reaper %ld", (long) earliest - now);
  337. ASSERT(time_after(earliest, now));
  338. rxrpc_queue_delayed_work(&rxrpc_connection_reap,
  339. earliest - now);
  340. }
  341. while (!list_empty(&graveyard)) {
  342. conn = list_entry(graveyard.next, struct rxrpc_connection,
  343. link);
  344. list_del_init(&conn->link);
  345. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  346. rxrpc_kill_connection(conn);
  347. }
  348. _leave("");
  349. }
  350. /*
  351. * preemptively destroy all the service connection records rather than
  352. * waiting for them to time out
  353. */
  354. void __exit rxrpc_destroy_all_connections(void)
  355. {
  356. struct rxrpc_connection *conn, *_p;
  357. bool leak = false;
  358. _enter("");
  359. rxrpc_destroy_all_client_connections();
  360. rxrpc_connection_expiry = 0;
  361. cancel_delayed_work(&rxrpc_connection_reap);
  362. rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
  363. flush_workqueue(rxrpc_workqueue);
  364. write_lock(&rxrpc_connection_lock);
  365. list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
  366. pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
  367. conn, atomic_read(&conn->usage));
  368. leak = true;
  369. }
  370. write_unlock(&rxrpc_connection_lock);
  371. BUG_ON(leak);
  372. ASSERT(list_empty(&rxrpc_connection_proc_list));
  373. /* Make sure the local and peer records pinned by any dying connections
  374. * are released.
  375. */
  376. rcu_barrier();
  377. rxrpc_destroy_client_conn_ids();
  378. _leave("");
  379. }