ChannelEventQueue.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
  2. */
  3. /* This Source Code Form is subject to the terms of the Mozilla Public
  4. * License, v. 2.0. If a copy of the MPL was not distributed with this
  5. * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
  6. #ifndef mozilla_net_ChannelEventQueue_h
  7. #define mozilla_net_ChannelEventQueue_h
  8. #include "nsTArray.h"
  9. #include "nsAutoPtr.h"
  10. #include "mozilla/Mutex.h"
  11. #include "mozilla/UniquePtr.h"
  12. class nsISupports;
  13. class nsIEventTarget;
  14. namespace mozilla {
  15. namespace net {
  16. class ChannelEvent
  17. {
  18. public:
  19. ChannelEvent() { MOZ_COUNT_CTOR(ChannelEvent); }
  20. virtual ~ChannelEvent() { MOZ_COUNT_DTOR(ChannelEvent); }
  21. virtual void Run() = 0;
  22. };
  23. // Workaround for Necko re-entrancy dangers. We buffer IPDL messages in a
  24. // queue if still dispatching previous one(s) to listeners/observers.
  25. // Otherwise synchronous XMLHttpRequests and/or other code that spins the
  26. // event loop (ex: IPDL rpc) could cause listener->OnDataAvailable (for
  27. // instance) to be dispatched and called before mListener->OnStartRequest has
  28. // completed.
  29. class ChannelEventQueue final
  30. {
  31. NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ChannelEventQueue)
  32. public:
  33. explicit ChannelEventQueue(nsISupports *owner)
  34. : mSuspendCount(0)
  35. , mSuspended(false)
  36. , mForced(false)
  37. , mFlushing(false)
  38. , mOwner(owner)
  39. , mMutex("ChannelEventQueue::mMutex")
  40. {}
  41. // Puts IPDL-generated channel event into queue, to be run later
  42. // automatically when EndForcedQueueing and/or Resume is called.
  43. //
  44. // @param aCallback - the ChannelEvent
  45. // @param aAssertionWhenNotQueued - this optional param will be used in an
  46. // assertion when the event is executed directly.
  47. inline void RunOrEnqueue(ChannelEvent* aCallback,
  48. bool aAssertionWhenNotQueued = false);
  49. inline nsresult PrependEvents(nsTArray<UniquePtr<ChannelEvent>>& aEvents);
  50. // After StartForcedQueueing is called, RunOrEnqueue() will start enqueuing
  51. // events that will be run/flushed when EndForcedQueueing is called.
  52. // - Note: queueing may still be required after EndForcedQueueing() (if the
  53. // queue is suspended, etc): always call RunOrEnqueue() to avoid race
  54. // conditions.
  55. inline void StartForcedQueueing();
  56. inline void EndForcedQueueing();
  57. // Suspend/resume event queue. RunOrEnqueue() will start enqueuing
  58. // events and they will be run/flushed when resume is called. These should be
  59. // called when the channel owning the event queue is suspended/resumed.
  60. inline void Suspend();
  61. // Resume flushes the queue asynchronously, i.e. items in queue will be
  62. // dispatched in a new event on the current thread.
  63. void Resume();
  64. // Retargets delivery of events to the target thread specified.
  65. nsresult RetargetDeliveryTo(nsIEventTarget* aTargetThread);
  66. private:
  67. // Private destructor, to discourage deletion outside of Release():
  68. ~ChannelEventQueue()
  69. {
  70. }
  71. inline void MaybeFlushQueue();
  72. void FlushQueue();
  73. inline void CompleteResume();
  74. ChannelEvent* TakeEvent();
  75. nsTArray<UniquePtr<ChannelEvent>> mEventQueue;
  76. uint32_t mSuspendCount;
  77. bool mSuspended;
  78. bool mForced;
  79. bool mFlushing;
  80. // Keep ptr to avoid refcount cycle: only grab ref during flushing.
  81. nsISupports *mOwner;
  82. Mutex mMutex;
  83. // EventTarget for delivery of events to the correct thread.
  84. nsCOMPtr<nsIEventTarget> mTargetThread;
  85. friend class AutoEventEnqueuer;
  86. };
  87. inline void
  88. ChannelEventQueue::RunOrEnqueue(ChannelEvent* aCallback,
  89. bool aAssertionWhenNotQueued)
  90. {
  91. MOZ_ASSERT(aCallback);
  92. // To avoid leaks.
  93. UniquePtr<ChannelEvent> event(aCallback);
  94. {
  95. MutexAutoLock lock(mMutex);
  96. bool enqueue = mForced || mSuspended || mFlushing;
  97. MOZ_ASSERT(enqueue == true || mEventQueue.IsEmpty(),
  98. "Should always enqueue if ChannelEventQueue not empty");
  99. if (enqueue) {
  100. mEventQueue.AppendElement(Move(event));
  101. return;
  102. }
  103. }
  104. MOZ_RELEASE_ASSERT(!aAssertionWhenNotQueued);
  105. event->Run();
  106. }
  107. inline void
  108. ChannelEventQueue::StartForcedQueueing()
  109. {
  110. MutexAutoLock lock(mMutex);
  111. mForced = true;
  112. }
  113. inline void
  114. ChannelEventQueue::EndForcedQueueing()
  115. {
  116. {
  117. MutexAutoLock lock(mMutex);
  118. mForced = false;
  119. }
  120. MaybeFlushQueue();
  121. }
  122. inline nsresult
  123. ChannelEventQueue::PrependEvents(nsTArray<UniquePtr<ChannelEvent>>& aEvents)
  124. {
  125. MutexAutoLock lock(mMutex);
  126. UniquePtr<ChannelEvent>* newEvents =
  127. mEventQueue.InsertElementsAt(0, aEvents.Length());
  128. if (!newEvents) {
  129. return NS_ERROR_OUT_OF_MEMORY;
  130. }
  131. for (uint32_t i = 0; i < aEvents.Length(); i++) {
  132. newEvents[i] = Move(aEvents[i]);
  133. }
  134. return NS_OK;
  135. }
  136. inline void
  137. ChannelEventQueue::Suspend()
  138. {
  139. MutexAutoLock lock(mMutex);
  140. mSuspended = true;
  141. mSuspendCount++;
  142. }
  143. inline void
  144. ChannelEventQueue::CompleteResume()
  145. {
  146. {
  147. MutexAutoLock lock(mMutex);
  148. // channel may have been suspended again since Resume fired event to call
  149. // this.
  150. if (!mSuspendCount) {
  151. // we need to remain logically suspended (for purposes of queuing incoming
  152. // messages) until this point, else new incoming messages could run before
  153. // queued ones.
  154. mSuspended = false;
  155. }
  156. }
  157. MaybeFlushQueue();
  158. }
  159. inline void
  160. ChannelEventQueue::MaybeFlushQueue()
  161. {
  162. // Don't flush if forced queuing on, we're already being flushed, or
  163. // suspended, or there's nothing to flush
  164. bool flushQueue = false;
  165. {
  166. MutexAutoLock lock(mMutex);
  167. flushQueue = !mForced && !mFlushing && !mSuspended &&
  168. !mEventQueue.IsEmpty();
  169. }
  170. if (flushQueue) {
  171. FlushQueue();
  172. }
  173. }
  174. // Ensures that RunOrEnqueue() will be collecting events during its lifetime
  175. // (letting caller know incoming IPDL msgs should be queued). Flushes the queue
  176. // when it goes out of scope.
  177. class MOZ_STACK_CLASS AutoEventEnqueuer
  178. {
  179. public:
  180. explicit AutoEventEnqueuer(ChannelEventQueue *queue) : mEventQueue(queue) {
  181. mEventQueue->StartForcedQueueing();
  182. }
  183. ~AutoEventEnqueuer() {
  184. mEventQueue->EndForcedQueueing();
  185. }
  186. private:
  187. RefPtr<ChannelEventQueue> mEventQueue;
  188. };
  189. } // namespace net
  190. } // namespace mozilla
  191. #endif