SourceBuffer.cpp 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691
  1. /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
  2. /* This Source Code Form is subject to the terms of the Mozilla Public
  3. * License, v. 2.0. If a copy of the MPL was not distributed with this
  4. * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
  5. #include "SourceBuffer.h"
  6. #include <algorithm>
  7. #include <cmath>
  8. #include <cstring>
  9. #include "mozilla/Likely.h"
  10. #include "nsIInputStream.h"
  11. #include "MainThreadUtils.h"
  12. #include "SurfaceCache.h"
  13. using std::max;
  14. using std::min;
  15. namespace mozilla {
  16. namespace image {
  17. //////////////////////////////////////////////////////////////////////////////
  18. // SourceBufferIterator implementation.
  19. //////////////////////////////////////////////////////////////////////////////
  20. SourceBufferIterator::~SourceBufferIterator()
  21. {
  22. if (mOwner) {
  23. mOwner->OnIteratorRelease();
  24. }
  25. }
  26. SourceBufferIterator&
  27. SourceBufferIterator::operator=(SourceBufferIterator&& aOther)
  28. {
  29. if (mOwner) {
  30. mOwner->OnIteratorRelease();
  31. }
  32. mOwner = Move(aOther.mOwner);
  33. mState = aOther.mState;
  34. mData = aOther.mData;
  35. mChunkCount = aOther.mChunkCount;
  36. mByteCount = aOther.mByteCount;
  37. return *this;
  38. }
  39. SourceBufferIterator::State
  40. SourceBufferIterator::AdvanceOrScheduleResume(size_t aRequestedBytes,
  41. IResumable* aConsumer)
  42. {
  43. MOZ_ASSERT(mOwner);
  44. if (MOZ_UNLIKELY(!HasMore())) {
  45. MOZ_ASSERT_UNREACHABLE("Should not advance a completed iterator");
  46. return COMPLETE;
  47. }
  48. // The range of data [mOffset, mOffset + mNextReadLength) has just been read
  49. // by the caller (or at least they don't have any interest in it), so consume
  50. // that data.
  51. MOZ_ASSERT(mData.mIterating.mNextReadLength <= mData.mIterating.mAvailableLength);
  52. mData.mIterating.mOffset += mData.mIterating.mNextReadLength;
  53. mData.mIterating.mAvailableLength -= mData.mIterating.mNextReadLength;
  54. mData.mIterating.mNextReadLength = 0;
  55. if (MOZ_LIKELY(mState == READY)) {
  56. // If the caller wants zero bytes of data, that's easy enough; we just
  57. // configured ourselves for a zero-byte read above! In theory we could do
  58. // this even in the START state, but it's not important for performance and
  59. // breaking the ability of callers to assert that the pointer returned by
  60. // Data() is non-null doesn't seem worth it.
  61. if (aRequestedBytes == 0) {
  62. MOZ_ASSERT(mData.mIterating.mNextReadLength == 0);
  63. return READY;
  64. }
  65. // Try to satisfy the request out of our local buffer. This is potentially
  66. // much faster than requesting data from our owning SourceBuffer because we
  67. // don't have to take the lock. Note that if we have anything at all in our
  68. // local buffer, we use it to satisfy the request; @aRequestedBytes is just
  69. // the *maximum* number of bytes we can return.
  70. if (mData.mIterating.mAvailableLength > 0) {
  71. return AdvanceFromLocalBuffer(aRequestedBytes);
  72. }
  73. }
  74. // Our local buffer is empty, so we'll have to request data from our owning
  75. // SourceBuffer.
  76. return mOwner->AdvanceIteratorOrScheduleResume(*this,
  77. aRequestedBytes,
  78. aConsumer);
  79. }
  80. bool
  81. SourceBufferIterator::RemainingBytesIsNoMoreThan(size_t aBytes) const
  82. {
  83. MOZ_ASSERT(mOwner);
  84. return mOwner->RemainingBytesIsNoMoreThan(*this, aBytes);
  85. }
  86. //////////////////////////////////////////////////////////////////////////////
  87. // SourceBuffer implementation.
  88. //////////////////////////////////////////////////////////////////////////////
  89. const size_t SourceBuffer::MIN_CHUNK_CAPACITY;
  90. SourceBuffer::SourceBuffer()
  91. : mMutex("image::SourceBuffer")
  92. , mConsumerCount(0)
  93. { }
  94. SourceBuffer::~SourceBuffer()
  95. {
  96. MOZ_ASSERT(mConsumerCount == 0,
  97. "SourceBuffer destroyed with active consumers");
  98. }
  99. nsresult
  100. SourceBuffer::AppendChunk(Maybe<Chunk>&& aChunk)
  101. {
  102. mMutex.AssertCurrentThreadOwns();
  103. #ifdef DEBUG
  104. if (mChunks.Length() > 0) {
  105. NS_WARNING("Appending an extra chunk for SourceBuffer");
  106. }
  107. #endif
  108. if (MOZ_UNLIKELY(!aChunk)) {
  109. return NS_ERROR_OUT_OF_MEMORY;
  110. }
  111. if (MOZ_UNLIKELY(aChunk->AllocationFailed())) {
  112. return NS_ERROR_OUT_OF_MEMORY;
  113. }
  114. if (MOZ_UNLIKELY(!mChunks.AppendElement(Move(*aChunk), fallible))) {
  115. return NS_ERROR_OUT_OF_MEMORY;
  116. }
  117. return NS_OK;
  118. }
  119. Maybe<SourceBuffer::Chunk>
  120. SourceBuffer::CreateChunk(size_t aCapacity, bool aRoundUp /* = true */)
  121. {
  122. if (MOZ_UNLIKELY(aCapacity == 0)) {
  123. MOZ_ASSERT_UNREACHABLE("Appending a chunk of zero size?");
  124. return Nothing();
  125. }
  126. // Round up if requested.
  127. size_t finalCapacity = aRoundUp ? RoundedUpCapacity(aCapacity)
  128. : aCapacity;
  129. // Use the size of the SurfaceCache as an additional heuristic to avoid
  130. // allocating huge buffers. Generally images do not get smaller when decoded,
  131. // so if we could store the source data in the SurfaceCache, we assume that
  132. // there's no way we'll be able to store the decoded version.
  133. if (MOZ_UNLIKELY(!SurfaceCache::CanHold(finalCapacity))) {
  134. NS_WARNING("SourceBuffer refused to create chunk too large for SurfaceCache");
  135. return Nothing();
  136. }
  137. return Some(Chunk(finalCapacity));
  138. }
  139. nsresult
  140. SourceBuffer::Compact()
  141. {
  142. mMutex.AssertCurrentThreadOwns();
  143. MOZ_ASSERT(mConsumerCount == 0, "Should have no consumers here");
  144. MOZ_ASSERT(mWaitingConsumers.Length() == 0, "Shouldn't have waiters");
  145. MOZ_ASSERT(mStatus, "Should be complete here");
  146. // Compact our waiting consumers list, since we're complete and no future
  147. // consumer will ever have to wait.
  148. mWaitingConsumers.Compact();
  149. // If we have no chunks, then there's nothing to compact.
  150. if (mChunks.Length() < 1) {
  151. return NS_OK;
  152. }
  153. // If we have one chunk, then we can compact if it has excess capacity.
  154. if (mChunks.Length() == 1 && mChunks[0].Length() == mChunks[0].Capacity()) {
  155. return NS_OK;
  156. }
  157. // We can compact our buffer. Determine the total length.
  158. size_t length = 0;
  159. for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) {
  160. length += mChunks[i].Length();
  161. }
  162. // If our total length is zero (which means ExpectLength() got called, but no
  163. // data ever actually got written) then just empty our chunk list.
  164. if (MOZ_UNLIKELY(length == 0)) {
  165. mChunks.Clear();
  166. return NS_OK;
  167. }
  168. Maybe<Chunk> newChunk = CreateChunk(length, /* aRoundUp = */ false);
  169. if (MOZ_UNLIKELY(!newChunk || newChunk->AllocationFailed())) {
  170. NS_WARNING("Failed to allocate chunk for SourceBuffer compacting - OOM?");
  171. return NS_OK;
  172. }
  173. // Copy our old chunks into the new chunk.
  174. for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) {
  175. size_t offset = newChunk->Length();
  176. MOZ_ASSERT(offset < newChunk->Capacity());
  177. MOZ_ASSERT(offset + mChunks[i].Length() <= newChunk->Capacity());
  178. memcpy(newChunk->Data() + offset, mChunks[i].Data(), mChunks[i].Length());
  179. newChunk->AddLength(mChunks[i].Length());
  180. }
  181. MOZ_ASSERT(newChunk->Length() == newChunk->Capacity(),
  182. "Compacted chunk has slack space");
  183. // Replace the old chunks with the new, compact chunk.
  184. mChunks.Clear();
  185. if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(Move(newChunk))))) {
  186. return HandleError(NS_ERROR_OUT_OF_MEMORY);
  187. }
  188. mChunks.Compact();
  189. return NS_OK;
  190. }
  191. /* static */ size_t
  192. SourceBuffer::RoundedUpCapacity(size_t aCapacity)
  193. {
  194. // Protect against overflow.
  195. if (MOZ_UNLIKELY(SIZE_MAX - aCapacity < MIN_CHUNK_CAPACITY)) {
  196. return aCapacity;
  197. }
  198. // Round up to the next multiple of MIN_CHUNK_CAPACITY (which should be the
  199. // size of a page).
  200. size_t roundedCapacity =
  201. (aCapacity + MIN_CHUNK_CAPACITY - 1) & ~(MIN_CHUNK_CAPACITY - 1);
  202. MOZ_ASSERT(roundedCapacity >= aCapacity, "Bad math?");
  203. MOZ_ASSERT(roundedCapacity - aCapacity < MIN_CHUNK_CAPACITY, "Bad math?");
  204. return roundedCapacity;
  205. }
  206. size_t
  207. SourceBuffer::FibonacciCapacityWithMinimum(size_t aMinCapacity)
  208. {
  209. mMutex.AssertCurrentThreadOwns();
  210. // We grow the source buffer using a Fibonacci growth rate.
  211. size_t length = mChunks.Length();
  212. if (length == 0) {
  213. return aMinCapacity;
  214. }
  215. if (length == 1) {
  216. return max(2 * mChunks[0].Capacity(), aMinCapacity);
  217. }
  218. return max(mChunks[length - 1].Capacity() + mChunks[length - 2].Capacity(),
  219. aMinCapacity);
  220. }
  221. void
  222. SourceBuffer::AddWaitingConsumer(IResumable* aConsumer)
  223. {
  224. mMutex.AssertCurrentThreadOwns();
  225. MOZ_ASSERT(!mStatus, "Waiting when we're complete?");
  226. if (aConsumer) {
  227. mWaitingConsumers.AppendElement(aConsumer);
  228. }
  229. }
  230. void
  231. SourceBuffer::ResumeWaitingConsumers()
  232. {
  233. mMutex.AssertCurrentThreadOwns();
  234. if (mWaitingConsumers.Length() == 0) {
  235. return;
  236. }
  237. for (uint32_t i = 0 ; i < mWaitingConsumers.Length() ; ++i) {
  238. mWaitingConsumers[i]->Resume();
  239. }
  240. mWaitingConsumers.Clear();
  241. }
  242. nsresult
  243. SourceBuffer::ExpectLength(size_t aExpectedLength)
  244. {
  245. MOZ_ASSERT(aExpectedLength > 0, "Zero expected size?");
  246. MutexAutoLock lock(mMutex);
  247. if (MOZ_UNLIKELY(mStatus)) {
  248. MOZ_ASSERT_UNREACHABLE("ExpectLength after SourceBuffer is complete");
  249. return NS_OK;
  250. }
  251. if (MOZ_UNLIKELY(mChunks.Length() > 0)) {
  252. MOZ_ASSERT_UNREACHABLE("Duplicate or post-Append call to ExpectLength");
  253. return NS_OK;
  254. }
  255. if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aExpectedLength))))) {
  256. return HandleError(NS_ERROR_OUT_OF_MEMORY);
  257. }
  258. return NS_OK;
  259. }
  260. nsresult
  261. SourceBuffer::Append(const char* aData, size_t aLength)
  262. {
  263. MOZ_ASSERT(aData, "Should have a buffer");
  264. MOZ_ASSERT(aLength > 0, "Writing a zero-sized chunk");
  265. size_t currentChunkCapacity = 0;
  266. size_t currentChunkLength = 0;
  267. char* currentChunkData = nullptr;
  268. size_t currentChunkRemaining = 0;
  269. size_t forCurrentChunk = 0;
  270. size_t forNextChunk = 0;
  271. size_t nextChunkCapacity = 0;
  272. {
  273. MutexAutoLock lock(mMutex);
  274. if (MOZ_UNLIKELY(mStatus)) {
  275. // This SourceBuffer is already complete; ignore further data.
  276. return NS_ERROR_FAILURE;
  277. }
  278. if (MOZ_UNLIKELY(mChunks.Length() == 0)) {
  279. if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aLength))))) {
  280. return HandleError(NS_ERROR_OUT_OF_MEMORY);
  281. }
  282. }
  283. // Copy out the current chunk's information so we can release the lock.
  284. // Note that this wouldn't be safe if multiple producers were allowed!
  285. Chunk& currentChunk = mChunks.LastElement();
  286. currentChunkCapacity = currentChunk.Capacity();
  287. currentChunkLength = currentChunk.Length();
  288. currentChunkData = currentChunk.Data();
  289. // Partition this data between the current chunk and the next chunk.
  290. // (Because we always allocate a chunk big enough to fit everything passed
  291. // to Append, we'll never need more than those two chunks to store
  292. // everything.)
  293. currentChunkRemaining = currentChunkCapacity - currentChunkLength;
  294. forCurrentChunk = min(aLength, currentChunkRemaining);
  295. forNextChunk = aLength - forCurrentChunk;
  296. // If we'll need another chunk, determine what its capacity should be while
  297. // we still hold the lock.
  298. nextChunkCapacity = forNextChunk > 0
  299. ? FibonacciCapacityWithMinimum(forNextChunk)
  300. : 0;
  301. }
  302. // Write everything we can fit into the current chunk.
  303. MOZ_ASSERT(currentChunkLength + forCurrentChunk <= currentChunkCapacity);
  304. memcpy(currentChunkData + currentChunkLength, aData, forCurrentChunk);
  305. // If there's something left, create a new chunk and write it there.
  306. Maybe<Chunk> nextChunk;
  307. if (forNextChunk > 0) {
  308. MOZ_ASSERT(nextChunkCapacity >= forNextChunk, "Next chunk too small?");
  309. nextChunk = CreateChunk(nextChunkCapacity);
  310. if (MOZ_LIKELY(nextChunk && !nextChunk->AllocationFailed())) {
  311. memcpy(nextChunk->Data(), aData + forCurrentChunk, forNextChunk);
  312. nextChunk->AddLength(forNextChunk);
  313. }
  314. }
  315. // Update shared data structures.
  316. {
  317. MutexAutoLock lock(mMutex);
  318. // Update the length of the current chunk.
  319. Chunk& currentChunk = mChunks.LastElement();
  320. MOZ_ASSERT(currentChunk.Data() == currentChunkData, "Multiple producers?");
  321. MOZ_ASSERT(currentChunk.Length() == currentChunkLength,
  322. "Multiple producers?");
  323. currentChunk.AddLength(forCurrentChunk);
  324. // If we created a new chunk, add it to the series.
  325. if (forNextChunk > 0) {
  326. if (MOZ_UNLIKELY(!nextChunk)) {
  327. return HandleError(NS_ERROR_OUT_OF_MEMORY);
  328. }
  329. if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(Move(nextChunk))))) {
  330. return HandleError(NS_ERROR_OUT_OF_MEMORY);
  331. }
  332. }
  333. // Resume any waiting readers now that there's new data.
  334. ResumeWaitingConsumers();
  335. }
  336. return NS_OK;
  337. }
  338. static nsresult
  339. AppendToSourceBuffer(nsIInputStream*,
  340. void* aClosure,
  341. const char* aFromRawSegment,
  342. uint32_t,
  343. uint32_t aCount,
  344. uint32_t* aWriteCount)
  345. {
  346. SourceBuffer* sourceBuffer = static_cast<SourceBuffer*>(aClosure);
  347. // Copy the source data. Unless we hit OOM, we squelch the return value here,
  348. // because returning an error means that ReadSegments stops reading data, and
  349. // we want to ensure that we read everything we get. If we hit OOM then we
  350. // return a failed status to the caller.
  351. nsresult rv = sourceBuffer->Append(aFromRawSegment, aCount);
  352. if (rv == NS_ERROR_OUT_OF_MEMORY) {
  353. return rv;
  354. }
  355. // Report that we wrote everything we got.
  356. *aWriteCount = aCount;
  357. return NS_OK;
  358. }
  359. nsresult
  360. SourceBuffer::AppendFromInputStream(nsIInputStream* aInputStream,
  361. uint32_t aCount)
  362. {
  363. uint32_t bytesRead;
  364. nsresult rv = aInputStream->ReadSegments(AppendToSourceBuffer, this,
  365. aCount, &bytesRead);
  366. if (NS_WARN_IF(NS_FAILED(rv))) {
  367. return rv;
  368. }
  369. if (bytesRead == 0) {
  370. // The loading of the image has been canceled.
  371. return NS_ERROR_FAILURE;
  372. }
  373. MOZ_ASSERT(bytesRead == aCount,
  374. "AppendToSourceBuffer should consume everything");
  375. return rv;
  376. }
  377. void
  378. SourceBuffer::Complete(nsresult aStatus)
  379. {
  380. MutexAutoLock lock(mMutex);
  381. if (MOZ_UNLIKELY(mStatus)) {
  382. MOZ_ASSERT_UNREACHABLE("Called Complete more than once");
  383. return;
  384. }
  385. if (MOZ_UNLIKELY(NS_SUCCEEDED(aStatus) && IsEmpty())) {
  386. // It's illegal to succeed without writing anything.
  387. aStatus = NS_ERROR_FAILURE;
  388. }
  389. mStatus = Some(aStatus);
  390. // Resume any waiting consumers now that we're complete.
  391. ResumeWaitingConsumers();
  392. // If we still have active consumers, just return.
  393. if (mConsumerCount > 0) {
  394. return;
  395. }
  396. // Attempt to compact our buffer down to a single chunk.
  397. Compact();
  398. }
  399. bool
  400. SourceBuffer::IsComplete()
  401. {
  402. MutexAutoLock lock(mMutex);
  403. return bool(mStatus);
  404. }
  405. size_t
  406. SourceBuffer::SizeOfIncludingThisWithComputedFallback(MallocSizeOf
  407. aMallocSizeOf) const
  408. {
  409. MutexAutoLock lock(mMutex);
  410. size_t n = aMallocSizeOf(this);
  411. n += mChunks.ShallowSizeOfExcludingThis(aMallocSizeOf);
  412. for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) {
  413. size_t chunkSize = aMallocSizeOf(mChunks[i].Data());
  414. if (chunkSize == 0) {
  415. // We're on a platform where moz_malloc_size_of always returns 0.
  416. chunkSize = mChunks[i].Capacity();
  417. }
  418. n += chunkSize;
  419. }
  420. return n;
  421. }
  422. SourceBufferIterator
  423. SourceBuffer::Iterator()
  424. {
  425. {
  426. MutexAutoLock lock(mMutex);
  427. mConsumerCount++;
  428. }
  429. return SourceBufferIterator(this);
  430. }
  431. void
  432. SourceBuffer::OnIteratorRelease()
  433. {
  434. MutexAutoLock lock(mMutex);
  435. MOZ_ASSERT(mConsumerCount > 0, "Consumer count doesn't add up");
  436. mConsumerCount--;
  437. // If we still have active consumers, or we're not complete yet, then return.
  438. if (mConsumerCount > 0 || !mStatus) {
  439. return;
  440. }
  441. // Attempt to compact our buffer down to a single chunk.
  442. Compact();
  443. }
  444. bool
  445. SourceBuffer::RemainingBytesIsNoMoreThan(const SourceBufferIterator& aIterator,
  446. size_t aBytes) const
  447. {
  448. MutexAutoLock lock(mMutex);
  449. // If we're not complete, we always say no.
  450. if (!mStatus) {
  451. return false;
  452. }
  453. // If the iterator's at the end, the answer is trivial.
  454. if (!aIterator.HasMore()) {
  455. return true;
  456. }
  457. uint32_t iteratorChunk = aIterator.mData.mIterating.mChunk;
  458. size_t iteratorOffset = aIterator.mData.mIterating.mOffset;
  459. size_t iteratorLength = aIterator.mData.mIterating.mAvailableLength;
  460. // Include the bytes the iterator is currently pointing to in the limit, so
  461. // that the current chunk doesn't have to be a special case.
  462. size_t bytes = aBytes + iteratorOffset + iteratorLength;
  463. // Count the length over all of our chunks, starting with the one that the
  464. // iterator is currently pointing to. (This is O(N), but N is expected to be
  465. // ~1, so it doesn't seem worth caching the length separately.)
  466. size_t lengthSoFar = 0;
  467. for (uint32_t i = iteratorChunk ; i < mChunks.Length() ; ++i) {
  468. lengthSoFar += mChunks[i].Length();
  469. if (lengthSoFar > bytes) {
  470. return false;
  471. }
  472. }
  473. return true;
  474. }
  475. SourceBufferIterator::State
  476. SourceBuffer::AdvanceIteratorOrScheduleResume(SourceBufferIterator& aIterator,
  477. size_t aRequestedBytes,
  478. IResumable* aConsumer)
  479. {
  480. MutexAutoLock lock(mMutex);
  481. MOZ_ASSERT(aIterator.HasMore(), "Advancing a completed iterator and "
  482. "AdvanceOrScheduleResume didn't catch it");
  483. if (MOZ_UNLIKELY(mStatus && NS_FAILED(*mStatus))) {
  484. // This SourceBuffer is complete due to an error; all reads fail.
  485. return aIterator.SetComplete(*mStatus);
  486. }
  487. if (MOZ_UNLIKELY(mChunks.Length() == 0)) {
  488. // We haven't gotten an initial chunk yet.
  489. AddWaitingConsumer(aConsumer);
  490. return aIterator.SetWaiting();
  491. }
  492. uint32_t iteratorChunkIdx = aIterator.mData.mIterating.mChunk;
  493. MOZ_ASSERT(iteratorChunkIdx < mChunks.Length());
  494. const Chunk& currentChunk = mChunks[iteratorChunkIdx];
  495. size_t iteratorEnd = aIterator.mData.mIterating.mOffset +
  496. aIterator.mData.mIterating.mAvailableLength;
  497. MOZ_ASSERT(iteratorEnd <= currentChunk.Length());
  498. MOZ_ASSERT(iteratorEnd <= currentChunk.Capacity());
  499. if (iteratorEnd < currentChunk.Length()) {
  500. // There's more data in the current chunk.
  501. return aIterator.SetReady(iteratorChunkIdx, currentChunk.Data(),
  502. iteratorEnd, currentChunk.Length() - iteratorEnd,
  503. aRequestedBytes);
  504. }
  505. if (iteratorEnd == currentChunk.Capacity() &&
  506. !IsLastChunk(iteratorChunkIdx)) {
  507. // Advance to the next chunk.
  508. const Chunk& nextChunk = mChunks[iteratorChunkIdx + 1];
  509. return aIterator.SetReady(iteratorChunkIdx + 1, nextChunk.Data(), 0,
  510. nextChunk.Length(), aRequestedBytes);
  511. }
  512. MOZ_ASSERT(IsLastChunk(iteratorChunkIdx), "Should've advanced");
  513. if (mStatus) {
  514. // There's no more data and this SourceBuffer completed successfully.
  515. MOZ_ASSERT(NS_SUCCEEDED(*mStatus), "Handled failures earlier");
  516. return aIterator.SetComplete(*mStatus);
  517. }
  518. // We're not complete, but there's no more data right now. Arrange to wake up
  519. // the consumer when we get more data.
  520. AddWaitingConsumer(aConsumer);
  521. return aIterator.SetWaiting();
  522. }
  523. nsresult
  524. SourceBuffer::HandleError(nsresult aError)
  525. {
  526. MOZ_ASSERT(NS_FAILED(aError), "Should have an error here");
  527. MOZ_ASSERT(aError == NS_ERROR_OUT_OF_MEMORY,
  528. "Unexpected error; may want to notify waiting readers, which "
  529. "HandleError currently doesn't do");
  530. mMutex.AssertCurrentThreadOwns();
  531. NS_WARNING("SourceBuffer encountered an unrecoverable error");
  532. // Record the error.
  533. mStatus = Some(aError);
  534. // Drop our references to waiting readers.
  535. mWaitingConsumers.Clear();
  536. return *mStatus;
  537. }
  538. bool
  539. SourceBuffer::IsEmpty()
  540. {
  541. mMutex.AssertCurrentThreadOwns();
  542. return mChunks.Length() == 0 ||
  543. mChunks[0].Length() == 0;
  544. }
  545. bool
  546. SourceBuffer::IsLastChunk(uint32_t aChunk)
  547. {
  548. mMutex.AssertCurrentThreadOwns();
  549. return aChunk + 1 == mChunks.Length();
  550. }
  551. } // namespace image
  552. } // namespace mozilla