callback.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /*
  2. * Facility for queueing callback functions to be run from the
  3. * top-level event loop after the current top-level activity finishes.
  4. */
  5. #include <stddef.h>
  6. #include "putty.h"
  7. struct callback {
  8. struct callback *next;
  9. toplevel_callback_fn_t fn;
  10. void *ctx;
  11. };
  12. static struct callback *cbcurr = NULL, *cbhead = NULL, *cbtail = NULL;
  13. static toplevel_callback_notify_fn_t notify_frontend = NULL;
  14. static void *notify_ctx = NULL;
  15. void request_callback_notifications(toplevel_callback_notify_fn_t fn,
  16. void *ctx)
  17. {
  18. notify_frontend = fn;
  19. notify_ctx = ctx;
  20. }
  21. static void run_idempotent_callback(void *ctx)
  22. {
  23. struct IdempotentCallback *ic = (struct IdempotentCallback *)ctx;
  24. ic->queued = false;
  25. ic->fn(ic->ctx);
  26. }
  27. void queue_idempotent_callback(struct IdempotentCallback *ic)
  28. {
  29. if (ic->queued)
  30. return;
  31. ic->queued = true;
  32. queue_toplevel_callback(run_idempotent_callback, ic);
  33. }
  34. void delete_callbacks_for_context(void *ctx)
  35. {
  36. struct callback *newhead, *newtail;
  37. newhead = newtail = NULL;
  38. while (cbhead) {
  39. struct callback *cb = cbhead;
  40. cbhead = cbhead->next;
  41. if (cb->ctx == ctx ||
  42. (cb->fn == run_idempotent_callback &&
  43. ((struct IdempotentCallback *)cb->ctx)->ctx == ctx)) {
  44. sfree(cb);
  45. } else {
  46. if (!newhead)
  47. newhead = cb;
  48. else
  49. newtail->next = cb;
  50. newtail = cb;
  51. }
  52. }
  53. cbhead = newhead;
  54. cbtail = newtail;
  55. if (newtail)
  56. newtail->next = NULL;
  57. }
  58. void queue_toplevel_callback(toplevel_callback_fn_t fn, void *ctx)
  59. {
  60. struct callback *cb;
  61. cb = snew(struct callback);
  62. cb->fn = fn;
  63. cb->ctx = ctx;
  64. /*
  65. * If the front end has requested notification of pending
  66. * callbacks, and we didn't already have one queued, let it know
  67. * we do have one now.
  68. *
  69. * If cbcurr is non-NULL, i.e. we are actually in the middle of
  70. * executing a callback right now, then we count that as the queue
  71. * already having been non-empty. That saves the front end getting
  72. * a constant stream of needless re-notifications if the last
  73. * callback keeps re-scheduling itself.
  74. */
  75. if (notify_frontend && !cbhead && !cbcurr)
  76. notify_frontend(notify_ctx);
  77. if (cbtail)
  78. cbtail->next = cb;
  79. else
  80. cbhead = cb;
  81. cbtail = cb;
  82. cb->next = NULL;
  83. }
  84. bool run_toplevel_callbacks(void)
  85. {
  86. bool done_something = false;
  87. if (cbhead) {
  88. /*
  89. * Transfer the head callback into cbcurr to indicate that
  90. * it's being executed. Then operations which transform the
  91. * queue, like delete_callbacks_for_context, can proceed as if
  92. * it's not there.
  93. */
  94. cbcurr = cbhead;
  95. cbhead = cbhead->next;
  96. if (!cbhead)
  97. cbtail = NULL;
  98. /*
  99. * Now run the callback, and then clear it out of cbcurr.
  100. */
  101. cbcurr->fn(cbcurr->ctx);
  102. sfree(cbcurr);
  103. cbcurr = NULL;
  104. done_something = true;
  105. }
  106. return done_something;
  107. }
  108. bool toplevel_callback_pending(void)
  109. {
  110. return cbcurr != NULL || cbhead != NULL;
  111. }