xpc_channel.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) channel support.
  10. *
  11. * This is the part of XPC that manages the channels and
  12. * sends/receives messages across them to/from other partitions.
  13. *
  14. */
  15. #include <linux/device.h>
  16. #include "xpc.h"
  17. /*
  18. * Process a connect message from a remote partition.
  19. *
  20. * Note: xpc_process_connect() is expecting to be called with the
  21. * spin_lock_irqsave held and will leave it locked upon return.
  22. */
  23. static void
  24. xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
  25. {
  26. enum xp_retval ret;
  27. DBUG_ON(!spin_is_locked(&ch->lock));
  28. if (!(ch->flags & XPC_C_OPENREQUEST) ||
  29. !(ch->flags & XPC_C_ROPENREQUEST)) {
  30. /* nothing more to do for now */
  31. return;
  32. }
  33. DBUG_ON(!(ch->flags & XPC_C_CONNECTING));
  34. if (!(ch->flags & XPC_C_SETUP)) {
  35. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  36. ret = xpc_arch_ops.setup_msg_structures(ch);
  37. spin_lock_irqsave(&ch->lock, *irq_flags);
  38. if (ret != xpSuccess)
  39. XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
  40. else
  41. ch->flags |= XPC_C_SETUP;
  42. if (ch->flags & XPC_C_DISCONNECTING)
  43. return;
  44. }
  45. if (!(ch->flags & XPC_C_OPENREPLY)) {
  46. ch->flags |= XPC_C_OPENREPLY;
  47. xpc_arch_ops.send_chctl_openreply(ch, irq_flags);
  48. }
  49. if (!(ch->flags & XPC_C_ROPENREPLY))
  50. return;
  51. if (!(ch->flags & XPC_C_OPENCOMPLETE)) {
  52. ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED);
  53. xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags);
  54. }
  55. if (!(ch->flags & XPC_C_ROPENCOMPLETE))
  56. return;
  57. dev_info(xpc_chan, "channel %d to partition %d connected\n",
  58. ch->number, ch->partid);
  59. ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */
  60. }
  61. /*
  62. * spin_lock_irqsave() is expected to be held on entry.
  63. */
  64. static void
  65. xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
  66. {
  67. struct xpc_partition *part = &xpc_partitions[ch->partid];
  68. u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
  69. DBUG_ON(!spin_is_locked(&ch->lock));
  70. if (!(ch->flags & XPC_C_DISCONNECTING))
  71. return;
  72. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  73. /* make sure all activity has settled down first */
  74. if (atomic_read(&ch->kthreads_assigned) > 0 ||
  75. atomic_read(&ch->references) > 0) {
  76. return;
  77. }
  78. DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  79. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE));
  80. if (part->act_state == XPC_P_AS_DEACTIVATING) {
  81. /* can't proceed until the other side disengages from us */
  82. if (xpc_arch_ops.partition_engaged(ch->partid))
  83. return;
  84. } else {
  85. /* as long as the other side is up do the full protocol */
  86. if (!(ch->flags & XPC_C_RCLOSEREQUEST))
  87. return;
  88. if (!(ch->flags & XPC_C_CLOSEREPLY)) {
  89. ch->flags |= XPC_C_CLOSEREPLY;
  90. xpc_arch_ops.send_chctl_closereply(ch, irq_flags);
  91. }
  92. if (!(ch->flags & XPC_C_RCLOSEREPLY))
  93. return;
  94. }
  95. /* wake those waiting for notify completion */
  96. if (atomic_read(&ch->n_to_notify) > 0) {
  97. /* we do callout while holding ch->lock, callout can't block */
  98. xpc_arch_ops.notify_senders_of_disconnect(ch);
  99. }
  100. /* both sides are disconnected now */
  101. if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) {
  102. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  103. xpc_disconnect_callout(ch, xpDisconnected);
  104. spin_lock_irqsave(&ch->lock, *irq_flags);
  105. }
  106. DBUG_ON(atomic_read(&ch->n_to_notify) != 0);
  107. /* it's now safe to free the channel's message queues */
  108. xpc_arch_ops.teardown_msg_structures(ch);
  109. ch->func = NULL;
  110. ch->key = NULL;
  111. ch->entry_size = 0;
  112. ch->local_nentries = 0;
  113. ch->remote_nentries = 0;
  114. ch->kthreads_assigned_limit = 0;
  115. ch->kthreads_idle_limit = 0;
  116. /*
  117. * Mark the channel disconnected and clear all other flags, including
  118. * XPC_C_SETUP (because of call to
  119. * xpc_arch_ops.teardown_msg_structures()) but not including
  120. * XPC_C_WDISCONNECT (if it was set).
  121. */
  122. ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT));
  123. atomic_dec(&part->nchannels_active);
  124. if (channel_was_connected) {
  125. dev_info(xpc_chan, "channel %d to partition %d disconnected, "
  126. "reason=%d\n", ch->number, ch->partid, ch->reason);
  127. }
  128. if (ch->flags & XPC_C_WDISCONNECT) {
  129. /* we won't lose the CPU since we're holding ch->lock */
  130. complete(&ch->wdisconnect_wait);
  131. } else if (ch->delayed_chctl_flags) {
  132. if (part->act_state != XPC_P_AS_DEACTIVATING) {
  133. /* time to take action on any delayed chctl flags */
  134. spin_lock(&part->chctl_lock);
  135. part->chctl.flags[ch->number] |=
  136. ch->delayed_chctl_flags;
  137. spin_unlock(&part->chctl_lock);
  138. }
  139. ch->delayed_chctl_flags = 0;
  140. }
  141. }
  142. /*
  143. * Process a change in the channel's remote connection state.
  144. */
  145. static void
  146. xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number,
  147. u8 chctl_flags)
  148. {
  149. unsigned long irq_flags;
  150. struct xpc_openclose_args *args =
  151. &part->remote_openclose_args[ch_number];
  152. struct xpc_channel *ch = &part->channels[ch_number];
  153. enum xp_retval reason;
  154. enum xp_retval ret;
  155. int create_kthread = 0;
  156. spin_lock_irqsave(&ch->lock, irq_flags);
  157. again:
  158. if ((ch->flags & XPC_C_DISCONNECTED) &&
  159. (ch->flags & XPC_C_WDISCONNECT)) {
  160. /*
  161. * Delay processing chctl flags until thread waiting disconnect
  162. * has had a chance to see that the channel is disconnected.
  163. */
  164. ch->delayed_chctl_flags |= chctl_flags;
  165. goto out;
  166. }
  167. if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) {
  168. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREQUEST (reason=%d) received "
  169. "from partid=%d, channel=%d\n", args->reason,
  170. ch->partid, ch->number);
  171. /*
  172. * If RCLOSEREQUEST is set, we're probably waiting for
  173. * RCLOSEREPLY. We should find it and a ROPENREQUEST packed
  174. * with this RCLOSEREQUEST in the chctl_flags.
  175. */
  176. if (ch->flags & XPC_C_RCLOSEREQUEST) {
  177. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
  178. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  179. DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY));
  180. DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY);
  181. DBUG_ON(!(chctl_flags & XPC_CHCTL_CLOSEREPLY));
  182. chctl_flags &= ~XPC_CHCTL_CLOSEREPLY;
  183. ch->flags |= XPC_C_RCLOSEREPLY;
  184. /* both sides have finished disconnecting */
  185. xpc_process_disconnect(ch, &irq_flags);
  186. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
  187. goto again;
  188. }
  189. if (ch->flags & XPC_C_DISCONNECTED) {
  190. if (!(chctl_flags & XPC_CHCTL_OPENREQUEST)) {
  191. if (part->chctl.flags[ch_number] &
  192. XPC_CHCTL_OPENREQUEST) {
  193. DBUG_ON(ch->delayed_chctl_flags != 0);
  194. spin_lock(&part->chctl_lock);
  195. part->chctl.flags[ch_number] |=
  196. XPC_CHCTL_CLOSEREQUEST;
  197. spin_unlock(&part->chctl_lock);
  198. }
  199. goto out;
  200. }
  201. XPC_SET_REASON(ch, 0, 0);
  202. ch->flags &= ~XPC_C_DISCONNECTED;
  203. atomic_inc(&part->nchannels_active);
  204. ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST);
  205. }
  206. chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY |
  207. XPC_CHCTL_OPENCOMPLETE);
  208. /*
  209. * The meaningful CLOSEREQUEST connection state fields are:
  210. * reason = reason connection is to be closed
  211. */
  212. ch->flags |= XPC_C_RCLOSEREQUEST;
  213. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  214. reason = args->reason;
  215. if (reason <= xpSuccess || reason > xpUnknownReason)
  216. reason = xpUnknownReason;
  217. else if (reason == xpUnregistering)
  218. reason = xpOtherUnregistering;
  219. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  220. DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY);
  221. goto out;
  222. }
  223. xpc_process_disconnect(ch, &irq_flags);
  224. }
  225. if (chctl_flags & XPC_CHCTL_CLOSEREPLY) {
  226. dev_dbg(xpc_chan, "XPC_CHCTL_CLOSEREPLY received from partid="
  227. "%d, channel=%d\n", ch->partid, ch->number);
  228. if (ch->flags & XPC_C_DISCONNECTED) {
  229. DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING);
  230. goto out;
  231. }
  232. DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST));
  233. if (!(ch->flags & XPC_C_RCLOSEREQUEST)) {
  234. if (part->chctl.flags[ch_number] &
  235. XPC_CHCTL_CLOSEREQUEST) {
  236. DBUG_ON(ch->delayed_chctl_flags != 0);
  237. spin_lock(&part->chctl_lock);
  238. part->chctl.flags[ch_number] |=
  239. XPC_CHCTL_CLOSEREPLY;
  240. spin_unlock(&part->chctl_lock);
  241. }
  242. goto out;
  243. }
  244. ch->flags |= XPC_C_RCLOSEREPLY;
  245. if (ch->flags & XPC_C_CLOSEREPLY) {
  246. /* both sides have finished disconnecting */
  247. xpc_process_disconnect(ch, &irq_flags);
  248. }
  249. }
  250. if (chctl_flags & XPC_CHCTL_OPENREQUEST) {
  251. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, "
  252. "local_nentries=%d) received from partid=%d, "
  253. "channel=%d\n", args->entry_size, args->local_nentries,
  254. ch->partid, ch->number);
  255. if (part->act_state == XPC_P_AS_DEACTIVATING ||
  256. (ch->flags & XPC_C_ROPENREQUEST)) {
  257. goto out;
  258. }
  259. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) {
  260. ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST;
  261. goto out;
  262. }
  263. DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED |
  264. XPC_C_OPENREQUEST)));
  265. DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  266. XPC_C_OPENREPLY | XPC_C_CONNECTED));
  267. /*
  268. * The meaningful OPENREQUEST connection state fields are:
  269. * entry_size = size of channel's messages in bytes
  270. * local_nentries = remote partition's local_nentries
  271. */
  272. if (args->entry_size == 0 || args->local_nentries == 0) {
  273. /* assume OPENREQUEST was delayed by mistake */
  274. goto out;
  275. }
  276. ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING);
  277. ch->remote_nentries = args->local_nentries;
  278. if (ch->flags & XPC_C_OPENREQUEST) {
  279. if (args->entry_size != ch->entry_size) {
  280. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  281. &irq_flags);
  282. goto out;
  283. }
  284. } else {
  285. ch->entry_size = args->entry_size;
  286. XPC_SET_REASON(ch, 0, 0);
  287. ch->flags &= ~XPC_C_DISCONNECTED;
  288. atomic_inc(&part->nchannels_active);
  289. }
  290. xpc_process_connect(ch, &irq_flags);
  291. }
  292. if (chctl_flags & XPC_CHCTL_OPENREPLY) {
  293. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa="
  294. "0x%lx, local_nentries=%d, remote_nentries=%d) "
  295. "received from partid=%d, channel=%d\n",
  296. args->local_msgqueue_pa, args->local_nentries,
  297. args->remote_nentries, ch->partid, ch->number);
  298. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
  299. goto out;
  300. if (!(ch->flags & XPC_C_OPENREQUEST)) {
  301. XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
  302. &irq_flags);
  303. goto out;
  304. }
  305. DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
  306. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  307. /*
  308. * The meaningful OPENREPLY connection state fields are:
  309. * local_msgqueue_pa = physical address of remote
  310. * partition's local_msgqueue
  311. * local_nentries = remote partition's local_nentries
  312. * remote_nentries = remote partition's remote_nentries
  313. */
  314. DBUG_ON(args->local_msgqueue_pa == 0);
  315. DBUG_ON(args->local_nentries == 0);
  316. DBUG_ON(args->remote_nentries == 0);
  317. ret = xpc_arch_ops.save_remote_msgqueue_pa(ch,
  318. args->local_msgqueue_pa);
  319. if (ret != xpSuccess) {
  320. XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags);
  321. goto out;
  322. }
  323. ch->flags |= XPC_C_ROPENREPLY;
  324. if (args->local_nentries < ch->remote_nentries) {
  325. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  326. "remote_nentries=%d, old remote_nentries=%d, "
  327. "partid=%d, channel=%d\n",
  328. args->local_nentries, ch->remote_nentries,
  329. ch->partid, ch->number);
  330. ch->remote_nentries = args->local_nentries;
  331. }
  332. if (args->remote_nentries < ch->local_nentries) {
  333. dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new "
  334. "local_nentries=%d, old local_nentries=%d, "
  335. "partid=%d, channel=%d\n",
  336. args->remote_nentries, ch->local_nentries,
  337. ch->partid, ch->number);
  338. ch->local_nentries = args->remote_nentries;
  339. }
  340. xpc_process_connect(ch, &irq_flags);
  341. }
  342. if (chctl_flags & XPC_CHCTL_OPENCOMPLETE) {
  343. dev_dbg(xpc_chan, "XPC_CHCTL_OPENCOMPLETE received from "
  344. "partid=%d, channel=%d\n", ch->partid, ch->number);
  345. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
  346. goto out;
  347. if (!(ch->flags & XPC_C_OPENREQUEST) ||
  348. !(ch->flags & XPC_C_OPENREPLY)) {
  349. XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError,
  350. &irq_flags);
  351. goto out;
  352. }
  353. DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST));
  354. DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY));
  355. DBUG_ON(!(ch->flags & XPC_C_CONNECTED));
  356. ch->flags |= XPC_C_ROPENCOMPLETE;
  357. xpc_process_connect(ch, &irq_flags);
  358. create_kthread = 1;
  359. }
  360. out:
  361. spin_unlock_irqrestore(&ch->lock, irq_flags);
  362. if (create_kthread)
  363. xpc_create_kthreads(ch, 1, 0);
  364. }
  365. /*
  366. * Attempt to establish a channel connection to a remote partition.
  367. */
  368. static enum xp_retval
  369. xpc_connect_channel(struct xpc_channel *ch)
  370. {
  371. unsigned long irq_flags;
  372. struct xpc_registration *registration = &xpc_registrations[ch->number];
  373. if (mutex_trylock(&registration->mutex) == 0)
  374. return xpRetry;
  375. if (!XPC_CHANNEL_REGISTERED(ch->number)) {
  376. mutex_unlock(&registration->mutex);
  377. return xpUnregistered;
  378. }
  379. spin_lock_irqsave(&ch->lock, irq_flags);
  380. DBUG_ON(ch->flags & XPC_C_CONNECTED);
  381. DBUG_ON(ch->flags & XPC_C_OPENREQUEST);
  382. if (ch->flags & XPC_C_DISCONNECTING) {
  383. spin_unlock_irqrestore(&ch->lock, irq_flags);
  384. mutex_unlock(&registration->mutex);
  385. return ch->reason;
  386. }
  387. /* add info from the channel connect registration to the channel */
  388. ch->kthreads_assigned_limit = registration->assigned_limit;
  389. ch->kthreads_idle_limit = registration->idle_limit;
  390. DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0);
  391. DBUG_ON(atomic_read(&ch->kthreads_idle) != 0);
  392. DBUG_ON(atomic_read(&ch->kthreads_active) != 0);
  393. ch->func = registration->func;
  394. DBUG_ON(registration->func == NULL);
  395. ch->key = registration->key;
  396. ch->local_nentries = registration->nentries;
  397. if (ch->flags & XPC_C_ROPENREQUEST) {
  398. if (registration->entry_size != ch->entry_size) {
  399. /* the local and remote sides aren't the same */
  400. /*
  401. * Because XPC_DISCONNECT_CHANNEL() can block we're
  402. * forced to up the registration sema before we unlock
  403. * the channel lock. But that's okay here because we're
  404. * done with the part that required the registration
  405. * sema. XPC_DISCONNECT_CHANNEL() requires that the
  406. * channel lock be locked and will unlock and relock
  407. * the channel lock as needed.
  408. */
  409. mutex_unlock(&registration->mutex);
  410. XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes,
  411. &irq_flags);
  412. spin_unlock_irqrestore(&ch->lock, irq_flags);
  413. return xpUnequalMsgSizes;
  414. }
  415. } else {
  416. ch->entry_size = registration->entry_size;
  417. XPC_SET_REASON(ch, 0, 0);
  418. ch->flags &= ~XPC_C_DISCONNECTED;
  419. atomic_inc(&xpc_partitions[ch->partid].nchannels_active);
  420. }
  421. mutex_unlock(&registration->mutex);
  422. /* initiate the connection */
  423. ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING);
  424. xpc_arch_ops.send_chctl_openrequest(ch, &irq_flags);
  425. xpc_process_connect(ch, &irq_flags);
  426. spin_unlock_irqrestore(&ch->lock, irq_flags);
  427. return xpSuccess;
  428. }
  429. void
  430. xpc_process_sent_chctl_flags(struct xpc_partition *part)
  431. {
  432. unsigned long irq_flags;
  433. union xpc_channel_ctl_flags chctl;
  434. struct xpc_channel *ch;
  435. int ch_number;
  436. u32 ch_flags;
  437. chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part);
  438. /*
  439. * Initiate channel connections for registered channels.
  440. *
  441. * For each connected channel that has pending messages activate idle
  442. * kthreads and/or create new kthreads as needed.
  443. */
  444. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  445. ch = &part->channels[ch_number];
  446. /*
  447. * Process any open or close related chctl flags, and then deal
  448. * with connecting or disconnecting the channel as required.
  449. */
  450. if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
  451. xpc_process_openclose_chctl_flags(part, ch_number,
  452. chctl.flags[ch_number]);
  453. }
  454. ch_flags = ch->flags; /* need an atomic snapshot of flags */
  455. if (ch_flags & XPC_C_DISCONNECTING) {
  456. spin_lock_irqsave(&ch->lock, irq_flags);
  457. xpc_process_disconnect(ch, &irq_flags);
  458. spin_unlock_irqrestore(&ch->lock, irq_flags);
  459. continue;
  460. }
  461. if (part->act_state == XPC_P_AS_DEACTIVATING)
  462. continue;
  463. if (!(ch_flags & XPC_C_CONNECTED)) {
  464. if (!(ch_flags & XPC_C_OPENREQUEST)) {
  465. DBUG_ON(ch_flags & XPC_C_SETUP);
  466. (void)xpc_connect_channel(ch);
  467. }
  468. continue;
  469. }
  470. /*
  471. * Process any message related chctl flags, this may involve
  472. * the activation of kthreads to deliver any pending messages
  473. * sent from the other partition.
  474. */
  475. if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
  476. xpc_arch_ops.process_msg_chctl_flags(part, ch_number);
  477. }
  478. }
  479. /*
  480. * XPC's heartbeat code calls this function to inform XPC that a partition is
  481. * going down. XPC responds by tearing down the XPartition Communication
  482. * infrastructure used for the just downed partition.
  483. *
  484. * XPC's heartbeat code will never call this function and xpc_partition_up()
  485. * at the same time. Nor will it ever make multiple calls to either function
  486. * at the same time.
  487. */
  488. void
  489. xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason)
  490. {
  491. unsigned long irq_flags;
  492. int ch_number;
  493. struct xpc_channel *ch;
  494. dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n",
  495. XPC_PARTID(part), reason);
  496. if (!xpc_part_ref(part)) {
  497. /* infrastructure for this partition isn't currently set up */
  498. return;
  499. }
  500. /* disconnect channels associated with the partition going down */
  501. for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
  502. ch = &part->channels[ch_number];
  503. xpc_msgqueue_ref(ch);
  504. spin_lock_irqsave(&ch->lock, irq_flags);
  505. XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags);
  506. spin_unlock_irqrestore(&ch->lock, irq_flags);
  507. xpc_msgqueue_deref(ch);
  508. }
  509. xpc_wakeup_channel_mgr(part);
  510. xpc_part_deref(part);
  511. }
  512. /*
  513. * Called by XP at the time of channel connection registration to cause
  514. * XPC to establish connections to all currently active partitions.
  515. */
  516. void
  517. xpc_initiate_connect(int ch_number)
  518. {
  519. short partid;
  520. struct xpc_partition *part;
  521. struct xpc_channel *ch;
  522. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  523. for (partid = 0; partid < xp_max_npartitions; partid++) {
  524. part = &xpc_partitions[partid];
  525. if (xpc_part_ref(part)) {
  526. ch = &part->channels[ch_number];
  527. /*
  528. * Initiate the establishment of a connection on the
  529. * newly registered channel to the remote partition.
  530. */
  531. xpc_wakeup_channel_mgr(part);
  532. xpc_part_deref(part);
  533. }
  534. }
  535. }
  536. void
  537. xpc_connected_callout(struct xpc_channel *ch)
  538. {
  539. /* let the registerer know that a connection has been established */
  540. if (ch->func != NULL) {
  541. dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, "
  542. "partid=%d, channel=%d\n", ch->partid, ch->number);
  543. ch->func(xpConnected, ch->partid, ch->number,
  544. (void *)(u64)ch->local_nentries, ch->key);
  545. dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, "
  546. "partid=%d, channel=%d\n", ch->partid, ch->number);
  547. }
  548. }
  549. /*
  550. * Called by XP at the time of channel connection unregistration to cause
  551. * XPC to teardown all current connections for the specified channel.
  552. *
  553. * Before returning xpc_initiate_disconnect() will wait until all connections
  554. * on the specified channel have been closed/torndown. So the caller can be
  555. * assured that they will not be receiving any more callouts from XPC to the
  556. * function they registered via xpc_connect().
  557. *
  558. * Arguments:
  559. *
  560. * ch_number - channel # to unregister.
  561. */
  562. void
  563. xpc_initiate_disconnect(int ch_number)
  564. {
  565. unsigned long irq_flags;
  566. short partid;
  567. struct xpc_partition *part;
  568. struct xpc_channel *ch;
  569. DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
  570. /* initiate the channel disconnect for every active partition */
  571. for (partid = 0; partid < xp_max_npartitions; partid++) {
  572. part = &xpc_partitions[partid];
  573. if (xpc_part_ref(part)) {
  574. ch = &part->channels[ch_number];
  575. xpc_msgqueue_ref(ch);
  576. spin_lock_irqsave(&ch->lock, irq_flags);
  577. if (!(ch->flags & XPC_C_DISCONNECTED)) {
  578. ch->flags |= XPC_C_WDISCONNECT;
  579. XPC_DISCONNECT_CHANNEL(ch, xpUnregistering,
  580. &irq_flags);
  581. }
  582. spin_unlock_irqrestore(&ch->lock, irq_flags);
  583. xpc_msgqueue_deref(ch);
  584. xpc_part_deref(part);
  585. }
  586. }
  587. xpc_disconnect_wait(ch_number);
  588. }
  589. /*
  590. * To disconnect a channel, and reflect it back to all who may be waiting.
  591. *
  592. * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
  593. * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
  594. * xpc_disconnect_wait().
  595. *
  596. * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
  597. */
  598. void
  599. xpc_disconnect_channel(const int line, struct xpc_channel *ch,
  600. enum xp_retval reason, unsigned long *irq_flags)
  601. {
  602. u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
  603. DBUG_ON(!spin_is_locked(&ch->lock));
  604. if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
  605. return;
  606. DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));
  607. dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
  608. reason, line, ch->partid, ch->number);
  609. XPC_SET_REASON(ch, reason, line);
  610. ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
  611. /* some of these may not have been set */
  612. ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
  613. XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
  614. XPC_C_CONNECTING | XPC_C_CONNECTED);
  615. xpc_arch_ops.send_chctl_closerequest(ch, irq_flags);
  616. if (channel_was_connected)
  617. ch->flags |= XPC_C_WASCONNECTED;
  618. spin_unlock_irqrestore(&ch->lock, *irq_flags);
  619. /* wake all idle kthreads so they can exit */
  620. if (atomic_read(&ch->kthreads_idle) > 0) {
  621. wake_up_all(&ch->idle_wq);
  622. } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  623. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
  624. /* start a kthread that will do the xpDisconnecting callout */
  625. xpc_create_kthreads(ch, 1, 1);
  626. }
  627. /* wake those waiting to allocate an entry from the local msg queue */
  628. if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
  629. wake_up(&ch->msg_allocate_wq);
  630. spin_lock_irqsave(&ch->lock, *irq_flags);
  631. }
  632. void
  633. xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason)
  634. {
  635. /*
  636. * Let the channel's registerer know that the channel is being
  637. * disconnected. We don't want to do this if the registerer was never
  638. * informed of a connection being made.
  639. */
  640. if (ch->func != NULL) {
  641. dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
  642. "channel=%d\n", reason, ch->partid, ch->number);
  643. ch->func(reason, ch->partid, ch->number, NULL, ch->key);
  644. dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
  645. "channel=%d\n", reason, ch->partid, ch->number);
  646. }
  647. }
  648. /*
  649. * Wait for a message entry to become available for the specified channel,
  650. * but don't wait any longer than 1 jiffy.
  651. */
  652. enum xp_retval
  653. xpc_allocate_msg_wait(struct xpc_channel *ch)
  654. {
  655. enum xp_retval ret;
  656. DEFINE_WAIT(wait);
  657. if (ch->flags & XPC_C_DISCONNECTING) {
  658. DBUG_ON(ch->reason == xpInterrupted);
  659. return ch->reason;
  660. }
  661. atomic_inc(&ch->n_on_msg_allocate_wq);
  662. prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
  663. ret = schedule_timeout(1);
  664. finish_wait(&ch->msg_allocate_wq, &wait);
  665. atomic_dec(&ch->n_on_msg_allocate_wq);
  666. if (ch->flags & XPC_C_DISCONNECTING) {
  667. ret = ch->reason;
  668. DBUG_ON(ch->reason == xpInterrupted);
  669. } else if (ret == 0) {
  670. ret = xpTimeout;
  671. } else {
  672. ret = xpInterrupted;
  673. }
  674. return ret;
  675. }
  676. /*
  677. * Send a message that contains the user's payload on the specified channel
  678. * connected to the specified partition.
  679. *
  680. * NOTE that this routine can sleep waiting for a message entry to become
  681. * available. To not sleep, pass in the XPC_NOWAIT flag.
  682. *
  683. * Once sent, this routine will not wait for the message to be received, nor
  684. * will notification be given when it does happen.
  685. *
  686. * Arguments:
  687. *
  688. * partid - ID of partition to which the channel is connected.
  689. * ch_number - channel # to send message on.
  690. * flags - see xp.h for valid flags.
  691. * payload - pointer to the payload which is to be sent.
  692. * payload_size - size of the payload in bytes.
  693. */
  694. enum xp_retval
  695. xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
  696. u16 payload_size)
  697. {
  698. struct xpc_partition *part = &xpc_partitions[partid];
  699. enum xp_retval ret = xpUnknownReason;
  700. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  701. partid, ch_number);
  702. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  703. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  704. DBUG_ON(payload == NULL);
  705. if (xpc_part_ref(part)) {
  706. ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
  707. flags, payload, payload_size, 0, NULL, NULL);
  708. xpc_part_deref(part);
  709. }
  710. return ret;
  711. }
  712. /*
  713. * Send a message that contains the user's payload on the specified channel
  714. * connected to the specified partition.
  715. *
  716. * NOTE that this routine can sleep waiting for a message entry to become
  717. * available. To not sleep, pass in the XPC_NOWAIT flag.
  718. *
  719. * This routine will not wait for the message to be sent or received.
  720. *
  721. * Once the remote end of the channel has received the message, the function
  722. * passed as an argument to xpc_initiate_send_notify() will be called. This
  723. * allows the sender to free up or re-use any buffers referenced by the
  724. * message, but does NOT mean the message has been processed at the remote
  725. * end by a receiver.
  726. *
  727. * If this routine returns an error, the caller's function will NOT be called.
  728. *
  729. * Arguments:
  730. *
  731. * partid - ID of partition to which the channel is connected.
  732. * ch_number - channel # to send message on.
  733. * flags - see xp.h for valid flags.
  734. * payload - pointer to the payload which is to be sent.
  735. * payload_size - size of the payload in bytes.
  736. * func - function to call with asynchronous notification of message
  737. * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
  738. * key - user-defined key to be passed to the function when it's called.
  739. */
  740. enum xp_retval
  741. xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload,
  742. u16 payload_size, xpc_notify_func func, void *key)
  743. {
  744. struct xpc_partition *part = &xpc_partitions[partid];
  745. enum xp_retval ret = xpUnknownReason;
  746. dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
  747. partid, ch_number);
  748. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  749. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  750. DBUG_ON(payload == NULL);
  751. DBUG_ON(func == NULL);
  752. if (xpc_part_ref(part)) {
  753. ret = xpc_arch_ops.send_payload(&part->channels[ch_number],
  754. flags, payload, payload_size, XPC_N_CALL, func, key);
  755. xpc_part_deref(part);
  756. }
  757. return ret;
  758. }
  759. /*
  760. * Deliver a message's payload to its intended recipient.
  761. */
  762. void
  763. xpc_deliver_payload(struct xpc_channel *ch)
  764. {
  765. void *payload;
  766. payload = xpc_arch_ops.get_deliverable_payload(ch);
  767. if (payload != NULL) {
  768. /*
  769. * This ref is taken to protect the payload itself from being
  770. * freed before the user is finished with it, which the user
  771. * indicates by calling xpc_initiate_received().
  772. */
  773. xpc_msgqueue_ref(ch);
  774. atomic_inc(&ch->kthreads_active);
  775. if (ch->func != NULL) {
  776. dev_dbg(xpc_chan, "ch->func() called, payload=0x%p "
  777. "partid=%d channel=%d\n", payload, ch->partid,
  778. ch->number);
  779. /* deliver the message to its intended recipient */
  780. ch->func(xpMsgReceived, ch->partid, ch->number, payload,
  781. ch->key);
  782. dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p "
  783. "partid=%d channel=%d\n", payload, ch->partid,
  784. ch->number);
  785. }
  786. atomic_dec(&ch->kthreads_active);
  787. }
  788. }
  789. /*
  790. * Acknowledge receipt of a delivered message's payload.
  791. *
  792. * This function, although called by users, does not call xpc_part_ref() to
  793. * ensure that the partition infrastructure is in place. It relies on the
  794. * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload().
  795. *
  796. * Arguments:
  797. *
  798. * partid - ID of partition to which the channel is connected.
  799. * ch_number - channel # message received on.
  800. * payload - pointer to the payload area allocated via
  801. * xpc_initiate_send() or xpc_initiate_send_notify().
  802. */
  803. void
  804. xpc_initiate_received(short partid, int ch_number, void *payload)
  805. {
  806. struct xpc_partition *part = &xpc_partitions[partid];
  807. struct xpc_channel *ch;
  808. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  809. DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
  810. ch = &part->channels[ch_number];
  811. xpc_arch_ops.received_payload(ch, payload);
  812. /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */
  813. xpc_msgqueue_deref(ch);
  814. }