SurfaceFilters.h 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
  2. /* This Source Code Form is subject to the terms of the Mozilla Public
  3. * License, v. 2.0. If a copy of the MPL was not distributed with this
  4. * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
  5. /**
  6. * This header contains various SurfaceFilter implementations that apply
  7. * transformations to image data, for usage with SurfacePipe.
  8. */
  9. #ifndef mozilla_image_SurfaceFilters_h
  10. #define mozilla_image_SurfaceFilters_h
  11. #include <algorithm>
  12. #include <stdint.h>
  13. #include <string.h>
  14. #include "mozilla/Likely.h"
  15. #include "mozilla/Maybe.h"
  16. #include "mozilla/UniquePtr.h"
  17. #include "mozilla/gfx/2D.h"
  18. #include "DownscalingFilter.h"
  19. #include "SurfaceCache.h"
  20. #include "SurfacePipe.h"
  21. namespace mozilla {
  22. namespace image {
  23. //////////////////////////////////////////////////////////////////////////////
  24. // DeinterlacingFilter
  25. //////////////////////////////////////////////////////////////////////////////
  26. template <typename PixelType, typename Next> class DeinterlacingFilter;
  27. /**
  28. * A configuration struct for DeinterlacingFilter.
  29. *
  30. * The 'PixelType' template parameter should be either uint32_t (for output to a
  31. * SurfaceSink) or uint8_t (for output to a PalettedSurfaceSink).
  32. */
  33. template <typename PixelType>
  34. struct DeinterlacingConfig
  35. {
  36. template <typename Next> using Filter = DeinterlacingFilter<PixelType, Next>;
  37. bool mProgressiveDisplay; /// If true, duplicate rows during deinterlacing
  38. /// to make progressive display look better, at
  39. /// the cost of some performance.
  40. };
  41. /**
  42. * DeinterlacingFilter performs deinterlacing by reordering the rows that are
  43. * written to it.
  44. *
  45. * The 'PixelType' template parameter should be either uint32_t (for output to a
  46. * SurfaceSink) or uint8_t (for output to a PalettedSurfaceSink).
  47. *
  48. * The 'Next' template parameter specifies the next filter in the chain.
  49. */
  50. template <typename PixelType, typename Next>
  51. class DeinterlacingFilter final : public SurfaceFilter
  52. {
  53. public:
  54. DeinterlacingFilter()
  55. : mInputRow(0)
  56. , mOutputRow(0)
  57. , mPass(0)
  58. , mProgressiveDisplay(true)
  59. { }
  60. template <typename... Rest>
  61. nsresult Configure(const DeinterlacingConfig<PixelType>& aConfig, const Rest&... aRest)
  62. {
  63. nsresult rv = mNext.Configure(aRest...);
  64. if (NS_FAILED(rv)) {
  65. return rv;
  66. }
  67. if (sizeof(PixelType) == 1 && !mNext.IsValidPalettedPipe()) {
  68. NS_WARNING("Paletted DeinterlacingFilter used with non-paletted pipe?");
  69. return NS_ERROR_INVALID_ARG;
  70. }
  71. if (sizeof(PixelType) == 4 && mNext.IsValidPalettedPipe()) {
  72. NS_WARNING("Non-paletted DeinterlacingFilter used with paletted pipe?");
  73. return NS_ERROR_INVALID_ARG;
  74. }
  75. gfx::IntSize outputSize = mNext.InputSize();
  76. mProgressiveDisplay = aConfig.mProgressiveDisplay;
  77. const uint32_t bufferSize = outputSize.width *
  78. outputSize.height *
  79. sizeof(PixelType);
  80. // Use the size of the SurfaceCache as a heuristic to avoid gigantic
  81. // allocations. Even if DownscalingFilter allowed us to allocate space for
  82. // the output image, the deinterlacing buffer may still be too big, and
  83. // fallible allocation won't always save us in the presence of overcommit.
  84. if (!SurfaceCache::CanHold(bufferSize)) {
  85. return NS_ERROR_OUT_OF_MEMORY;
  86. }
  87. // Allocate the buffer, which contains deinterlaced scanlines of the image.
  88. // The buffer is necessary so that we can output rows which have already
  89. // been deinterlaced again on subsequent passes. Since a later stage in the
  90. // pipeline may be transforming the rows it receives (for example, by
  91. // downscaling them), the rows may no longer exist in their original form on
  92. // the surface itself.
  93. mBuffer.reset(new (fallible) uint8_t[bufferSize]);
  94. if (MOZ_UNLIKELY(!mBuffer)) {
  95. return NS_ERROR_OUT_OF_MEMORY;
  96. }
  97. // Clear the buffer to avoid writing uninitialized memory to the output.
  98. memset(mBuffer.get(), 0, bufferSize);
  99. ConfigureFilter(outputSize, sizeof(PixelType));
  100. return NS_OK;
  101. }
  102. bool IsValidPalettedPipe() const override
  103. {
  104. return sizeof(PixelType) == 1 && mNext.IsValidPalettedPipe();
  105. }
  106. Maybe<SurfaceInvalidRect> TakeInvalidRect() override
  107. {
  108. return mNext.TakeInvalidRect();
  109. }
  110. protected:
  111. uint8_t* DoResetToFirstRow() override
  112. {
  113. mNext.ResetToFirstRow();
  114. mPass = 0;
  115. mInputRow = 0;
  116. mOutputRow = InterlaceOffset(mPass);
  117. return GetRowPointer(mOutputRow);
  118. }
  119. uint8_t* DoAdvanceRow() override
  120. {
  121. if (mPass >= 4) {
  122. return nullptr; // We already finished all passes.
  123. }
  124. if (mInputRow >= InputSize().height) {
  125. return nullptr; // We already got all the input rows we expect.
  126. }
  127. // Duplicate from the first Haeberli row to the remaining Haeberli rows
  128. // within the buffer.
  129. DuplicateRows(HaeberliOutputStartRow(mPass, mProgressiveDisplay, mOutputRow),
  130. HaeberliOutputUntilRow(mPass, mProgressiveDisplay,
  131. InputSize(), mOutputRow));
  132. // Write the current set of Haeberli rows (which contains the current row)
  133. // to the next stage in the pipeline.
  134. OutputRows(HaeberliOutputStartRow(mPass, mProgressiveDisplay, mOutputRow),
  135. HaeberliOutputUntilRow(mPass, mProgressiveDisplay,
  136. InputSize(), mOutputRow));
  137. // Determine which output row the next input row corresponds to.
  138. bool advancedPass = false;
  139. uint32_t stride = InterlaceStride(mPass);
  140. int32_t nextOutputRow = mOutputRow + stride;
  141. while (nextOutputRow >= InputSize().height) {
  142. // Copy any remaining rows from the buffer.
  143. if (!advancedPass) {
  144. OutputRows(HaeberliOutputUntilRow(mPass, mProgressiveDisplay,
  145. InputSize(), mOutputRow),
  146. InputSize().height);
  147. }
  148. // We finished the current pass; advance to the next one.
  149. mPass++;
  150. if (mPass >= 4) {
  151. return nullptr; // Finished all passes.
  152. }
  153. // Tell the next pipeline stage that we're starting the next pass.
  154. mNext.ResetToFirstRow();
  155. // Update our state to reflect the pass change.
  156. advancedPass = true;
  157. stride = InterlaceStride(mPass);
  158. nextOutputRow = InterlaceOffset(mPass);
  159. }
  160. MOZ_ASSERT(nextOutputRow >= 0);
  161. MOZ_ASSERT(nextOutputRow < InputSize().height);
  162. MOZ_ASSERT(HaeberliOutputStartRow(mPass, mProgressiveDisplay,
  163. nextOutputRow) >= 0);
  164. MOZ_ASSERT(HaeberliOutputStartRow(mPass, mProgressiveDisplay,
  165. nextOutputRow) < InputSize().height);
  166. MOZ_ASSERT(HaeberliOutputStartRow(mPass, mProgressiveDisplay,
  167. nextOutputRow) <= nextOutputRow);
  168. MOZ_ASSERT(HaeberliOutputUntilRow(mPass, mProgressiveDisplay,
  169. InputSize(), nextOutputRow) >= 0);
  170. MOZ_ASSERT(HaeberliOutputUntilRow(mPass, mProgressiveDisplay,
  171. InputSize(), nextOutputRow)
  172. <= InputSize().height);
  173. MOZ_ASSERT(HaeberliOutputUntilRow(mPass, mProgressiveDisplay,
  174. InputSize(), nextOutputRow)
  175. > nextOutputRow);
  176. int32_t nextHaeberliOutputRow =
  177. HaeberliOutputStartRow(mPass, mProgressiveDisplay, nextOutputRow);
  178. // Copy rows from the buffer until we reach the desired output row.
  179. if (advancedPass) {
  180. OutputRows(0, nextHaeberliOutputRow);
  181. } else {
  182. OutputRows(HaeberliOutputUntilRow(mPass, mProgressiveDisplay,
  183. InputSize(), mOutputRow),
  184. nextHaeberliOutputRow);
  185. }
  186. // Update our position within the buffer.
  187. mInputRow++;
  188. mOutputRow = nextOutputRow;
  189. // We'll actually write to the first Haeberli output row, then copy it until
  190. // we reach the last Haeberli output row. The assertions above make sure
  191. // this always includes mOutputRow.
  192. return GetRowPointer(nextHaeberliOutputRow);
  193. }
  194. private:
  195. static uint32_t InterlaceOffset(uint32_t aPass)
  196. {
  197. MOZ_ASSERT(aPass < 4, "Invalid pass");
  198. static const uint8_t offset[] = { 0, 4, 2, 1 };
  199. return offset[aPass];
  200. }
  201. static uint32_t InterlaceStride(uint32_t aPass)
  202. {
  203. MOZ_ASSERT(aPass < 4, "Invalid pass");
  204. static const uint8_t stride[] = { 8, 8, 4, 2 };
  205. return stride[aPass];
  206. }
  207. static int32_t HaeberliOutputStartRow(uint32_t aPass,
  208. bool aProgressiveDisplay,
  209. int32_t aOutputRow)
  210. {
  211. MOZ_ASSERT(aPass < 4, "Invalid pass");
  212. static const uint8_t firstRowOffset[] = { 3, 1, 0, 0 };
  213. if (aProgressiveDisplay) {
  214. return std::max(aOutputRow - firstRowOffset[aPass], 0);
  215. } else {
  216. return aOutputRow;
  217. }
  218. }
  219. static int32_t HaeberliOutputUntilRow(uint32_t aPass,
  220. bool aProgressiveDisplay,
  221. const gfx::IntSize& aInputSize,
  222. int32_t aOutputRow)
  223. {
  224. MOZ_ASSERT(aPass < 4, "Invalid pass");
  225. static const uint8_t lastRowOffset[] = { 4, 2, 1, 0 };
  226. if (aProgressiveDisplay) {
  227. return std::min(aOutputRow + lastRowOffset[aPass],
  228. aInputSize.height - 1)
  229. + 1; // Add one because this is an open interval on the right.
  230. } else {
  231. return aOutputRow + 1;
  232. }
  233. }
  234. void DuplicateRows(int32_t aStart, int32_t aUntil)
  235. {
  236. MOZ_ASSERT(aStart >= 0);
  237. MOZ_ASSERT(aUntil >= 0);
  238. if (aUntil <= aStart || aStart >= InputSize().height) {
  239. return;
  240. }
  241. // The source row is the first row in the range.
  242. const uint8_t* sourceRowPointer = GetRowPointer(aStart);
  243. // We duplicate the source row into each subsequent row in the range.
  244. for (int32_t destRow = aStart + 1 ; destRow < aUntil ; ++destRow) {
  245. uint8_t* destRowPointer = GetRowPointer(destRow);
  246. memcpy(destRowPointer, sourceRowPointer, InputSize().width * sizeof(PixelType));
  247. }
  248. }
  249. void OutputRows(int32_t aStart, int32_t aUntil)
  250. {
  251. MOZ_ASSERT(aStart >= 0);
  252. MOZ_ASSERT(aUntil >= 0);
  253. if (aUntil <= aStart || aStart >= InputSize().height) {
  254. return;
  255. }
  256. for (int32_t rowToOutput = aStart; rowToOutput < aUntil; ++rowToOutput) {
  257. mNext.WriteBuffer(reinterpret_cast<PixelType*>(GetRowPointer(rowToOutput)));
  258. }
  259. }
  260. uint8_t* GetRowPointer(uint32_t aRow) const
  261. {
  262. uint32_t offset = aRow * InputSize().width * sizeof(PixelType);
  263. MOZ_ASSERT(offset < InputSize().width * InputSize().height * sizeof(PixelType),
  264. "Start of row is outside of image");
  265. MOZ_ASSERT(offset + InputSize().width * sizeof(PixelType)
  266. <= InputSize().width * InputSize().height * sizeof(PixelType),
  267. "End of row is outside of image");
  268. return mBuffer.get() + offset;
  269. }
  270. Next mNext; /// The next SurfaceFilter in the chain.
  271. UniquePtr<uint8_t[]> mBuffer; /// The buffer used to store reordered rows.
  272. int32_t mInputRow; /// The current row we're reading. (0-indexed)
  273. int32_t mOutputRow; /// The current row we're writing. (0-indexed)
  274. uint8_t mPass; /// Which pass we're on. (0-indexed)
  275. bool mProgressiveDisplay; /// If true, duplicate rows to optimize for
  276. /// progressive display.
  277. };
  278. //////////////////////////////////////////////////////////////////////////////
  279. // RemoveFrameRectFilter
  280. //////////////////////////////////////////////////////////////////////////////
  281. template <typename Next> class RemoveFrameRectFilter;
  282. /**
  283. * A configuration struct for RemoveFrameRectFilter.
  284. */
  285. struct RemoveFrameRectConfig
  286. {
  287. template <typename Next> using Filter = RemoveFrameRectFilter<Next>;
  288. gfx::IntRect mFrameRect; /// The surface subrect which contains data.
  289. };
  290. /**
  291. * RemoveFrameRectFilter turns an image with a frame rect that does not match
  292. * its logical size into an image with no frame rect. It does this by writing
  293. * transparent pixels into any padding regions and throwing away excess data.
  294. *
  295. * The 'Next' template parameter specifies the next filter in the chain.
  296. */
  297. template <typename Next>
  298. class RemoveFrameRectFilter final : public SurfaceFilter
  299. {
  300. public:
  301. RemoveFrameRectFilter()
  302. : mRow(0)
  303. { }
  304. template <typename... Rest>
  305. nsresult Configure(const RemoveFrameRectConfig& aConfig, const Rest&... aRest)
  306. {
  307. nsresult rv = mNext.Configure(aRest...);
  308. if (NS_FAILED(rv)) {
  309. return rv;
  310. }
  311. if (mNext.IsValidPalettedPipe()) {
  312. NS_WARNING("RemoveFrameRectFilter used with paletted pipe?");
  313. return NS_ERROR_INVALID_ARG;
  314. }
  315. mFrameRect = mUnclampedFrameRect = aConfig.mFrameRect;
  316. gfx::IntSize outputSize = mNext.InputSize();
  317. // Forbid frame rects with negative size.
  318. if (aConfig.mFrameRect.width < 0 || aConfig.mFrameRect.height < 0) {
  319. return NS_ERROR_INVALID_ARG;
  320. }
  321. // Clamp mFrameRect to the output size.
  322. gfx::IntRect outputRect(0, 0, outputSize.width, outputSize.height);
  323. mFrameRect = mFrameRect.Intersect(outputRect);
  324. // If there's no intersection, |mFrameRect| will be an empty rect positioned
  325. // at the maximum of |inputRect|'s and |aFrameRect|'s coordinates, which is
  326. // not what we want. Force it to (0, 0) in that case.
  327. if (mFrameRect.IsEmpty()) {
  328. mFrameRect.MoveTo(0, 0);
  329. }
  330. // We don't need an intermediate buffer unless the unclamped frame rect
  331. // width is larger than the clamped frame rect width. In that case, the
  332. // caller will end up writing data that won't end up in the final image at
  333. // all, and we'll need a buffer to give that data a place to go.
  334. if (mFrameRect.width < mUnclampedFrameRect.width) {
  335. mBuffer.reset(new (fallible) uint8_t[mUnclampedFrameRect.width *
  336. sizeof(uint32_t)]);
  337. if (MOZ_UNLIKELY(!mBuffer)) {
  338. return NS_ERROR_OUT_OF_MEMORY;
  339. }
  340. memset(mBuffer.get(), 0, mUnclampedFrameRect.width * sizeof(uint32_t));
  341. }
  342. ConfigureFilter(mUnclampedFrameRect.Size(), sizeof(uint32_t));
  343. return NS_OK;
  344. }
  345. Maybe<SurfaceInvalidRect> TakeInvalidRect() override
  346. {
  347. return mNext.TakeInvalidRect();
  348. }
  349. protected:
  350. uint8_t* DoResetToFirstRow() override
  351. {
  352. uint8_t* rowPtr = mNext.ResetToFirstRow();
  353. if (rowPtr == nullptr) {
  354. mRow = mFrameRect.YMost();
  355. return nullptr;
  356. }
  357. mRow = mUnclampedFrameRect.y;
  358. // Advance the next pipeline stage to the beginning of the frame rect,
  359. // outputting blank rows.
  360. if (mFrameRect.y > 0) {
  361. for (int32_t rowToOutput = 0; rowToOutput < mFrameRect.y ; ++rowToOutput) {
  362. mNext.WriteEmptyRow();
  363. }
  364. }
  365. // We're at the beginning of the frame rect now, so return if we're either
  366. // ready for input or we're already done.
  367. rowPtr = mBuffer ? mBuffer.get() : mNext.CurrentRowPointer();
  368. if (!mFrameRect.IsEmpty() || rowPtr == nullptr) {
  369. // Note that the pointer we're returning is for the next row we're
  370. // actually going to write to, but we may discard writes before that point
  371. // if mRow < mFrameRect.y.
  372. return AdjustRowPointer(rowPtr);
  373. }
  374. // We've finished the region specified by the frame rect, but the frame rect
  375. // is empty, so we need to output the rest of the image immediately. Advance
  376. // to the end of the next pipeline stage's buffer, outputting blank rows.
  377. while (mNext.WriteEmptyRow() == WriteState::NEED_MORE_DATA) { }
  378. mRow = mFrameRect.YMost();
  379. return nullptr; // We're done.
  380. }
  381. uint8_t* DoAdvanceRow() override
  382. {
  383. uint8_t* rowPtr = nullptr;
  384. const int32_t currentRow = mRow;
  385. mRow++;
  386. if (currentRow < mFrameRect.y) {
  387. // This row is outside of the frame rect, so just drop it on the floor.
  388. rowPtr = mBuffer ? mBuffer.get() : mNext.CurrentRowPointer();
  389. return AdjustRowPointer(rowPtr);
  390. } else if (currentRow >= mFrameRect.YMost()) {
  391. NS_WARNING("RemoveFrameRectFilter: Advancing past end of frame rect");
  392. return nullptr;
  393. }
  394. // If we had to buffer, copy the data. Otherwise, just advance the row.
  395. if (mBuffer) {
  396. // We write from the beginning of the buffer unless |mUnclampedFrameRect.x|
  397. // is negative; if that's the case, we have to skip the portion of the
  398. // unclamped frame rect that's outside the row.
  399. uint32_t* source = reinterpret_cast<uint32_t*>(mBuffer.get()) -
  400. std::min(mUnclampedFrameRect.x, 0);
  401. // We write |mFrameRect.width| columns starting at |mFrameRect.x|; we've
  402. // already clamped these values to the size of the output, so we don't
  403. // have to worry about bounds checking here (though WriteBuffer() will do
  404. // it for us in any case).
  405. WriteState state = mNext.WriteBuffer(source, mFrameRect.x, mFrameRect.width);
  406. rowPtr = state == WriteState::NEED_MORE_DATA ? mBuffer.get()
  407. : nullptr;
  408. } else {
  409. rowPtr = mNext.AdvanceRow();
  410. }
  411. // If there's still more data coming or we're already done, just adjust the
  412. // pointer and return.
  413. if (mRow < mFrameRect.YMost() || rowPtr == nullptr) {
  414. return AdjustRowPointer(rowPtr);
  415. }
  416. // We've finished the region specified by the frame rect. Advance to the end
  417. // of the next pipeline stage's buffer, outputting blank rows.
  418. while (mNext.WriteEmptyRow() == WriteState::NEED_MORE_DATA) { }
  419. mRow = mFrameRect.YMost();
  420. return nullptr; // We're done.
  421. }
  422. private:
  423. uint8_t* AdjustRowPointer(uint8_t* aNextRowPointer) const
  424. {
  425. if (mBuffer) {
  426. MOZ_ASSERT(aNextRowPointer == mBuffer.get() || aNextRowPointer == nullptr);
  427. return aNextRowPointer; // No adjustment needed for an intermediate buffer.
  428. }
  429. if (mFrameRect.IsEmpty() ||
  430. mRow >= mFrameRect.YMost() ||
  431. aNextRowPointer == nullptr) {
  432. return nullptr; // Nothing left to write.
  433. }
  434. return aNextRowPointer + mFrameRect.x * sizeof(uint32_t);
  435. }
  436. Next mNext; /// The next SurfaceFilter in the chain.
  437. gfx::IntRect mFrameRect; /// The surface subrect which contains data,
  438. /// clamped to the image size.
  439. gfx::IntRect mUnclampedFrameRect; /// The frame rect before clamping.
  440. UniquePtr<uint8_t[]> mBuffer; /// The intermediate buffer, if one is
  441. /// necessary because the frame rect width
  442. /// is larger than the image's logical width.
  443. int32_t mRow; /// The row in unclamped frame rect space
  444. /// that we're currently writing.
  445. };
  446. //////////////////////////////////////////////////////////////////////////////
  447. // ADAM7InterpolatingFilter
  448. //////////////////////////////////////////////////////////////////////////////
  449. template <typename Next> class ADAM7InterpolatingFilter;
  450. /**
  451. * A configuration struct for ADAM7InterpolatingFilter.
  452. */
  453. struct ADAM7InterpolatingConfig
  454. {
  455. template <typename Next> using Filter = ADAM7InterpolatingFilter<Next>;
  456. };
  457. /**
  458. * ADAM7InterpolatingFilter performs bilinear interpolation over an ADAM7
  459. * interlaced image.
  460. *
  461. * ADAM7 breaks up the image into 8x8 blocks. On each of the 7 passes, a new set
  462. * of pixels in each block receives their final values, according to the
  463. * following pattern:
  464. *
  465. * 1 6 4 6 2 6 4 6
  466. * 7 7 7 7 7 7 7 7
  467. * 5 6 5 6 5 6 5 6
  468. * 7 7 7 7 7 7 7 7
  469. * 3 6 4 6 3 6 4 6
  470. * 7 7 7 7 7 7 7 7
  471. * 5 6 5 6 5 6 5 6
  472. * 7 7 7 7 7 7 7 7
  473. *
  474. * When rendering the pixels that have not yet received their final values, we
  475. * can get much better intermediate results if we interpolate between
  476. * the pixels we *have* gotten so far. This filter performs bilinear
  477. * interpolation by first performing linear interpolation horizontally for each
  478. * "important" row (which we'll define as a row that has received any pixels
  479. * with final values at all) and then performing linear interpolation vertically
  480. * to produce pixel values for rows which aren't important on the current pass.
  481. *
  482. * Note that this filter totally ignores the data which is written to rows which
  483. * aren't important on the current pass! It's fine to write nothing at all for
  484. * these rows, although doing so won't cause any harm.
  485. *
  486. * XXX(seth): In bug 1280552 we'll add a SIMD implementation for this filter.
  487. *
  488. * The 'Next' template parameter specifies the next filter in the chain.
  489. */
  490. template <typename Next>
  491. class ADAM7InterpolatingFilter final : public SurfaceFilter
  492. {
  493. public:
  494. ADAM7InterpolatingFilter()
  495. : mPass(0) // The current pass, in the range 1..7. Starts at 0 so that
  496. // DoResetToFirstRow() doesn't have to special case the first pass.
  497. , mRow(0)
  498. { }
  499. template <typename... Rest>
  500. nsresult Configure(const ADAM7InterpolatingConfig& aConfig, const Rest&... aRest)
  501. {
  502. nsresult rv = mNext.Configure(aRest...);
  503. if (NS_FAILED(rv)) {
  504. return rv;
  505. }
  506. if (mNext.IsValidPalettedPipe()) {
  507. NS_WARNING("ADAM7InterpolatingFilter used with paletted pipe?");
  508. return NS_ERROR_INVALID_ARG;
  509. }
  510. // We have two intermediate buffers, one for the previous row with final
  511. // pixel values and one for the row that the previous filter in the chain is
  512. // currently writing to.
  513. size_t inputWidthInBytes = mNext.InputSize().width * sizeof(uint32_t);
  514. mPreviousRow.reset(new (fallible) uint8_t[inputWidthInBytes]);
  515. if (MOZ_UNLIKELY(!mPreviousRow)) {
  516. return NS_ERROR_OUT_OF_MEMORY;
  517. }
  518. mCurrentRow.reset(new (fallible) uint8_t[inputWidthInBytes]);
  519. if (MOZ_UNLIKELY(!mCurrentRow)) {
  520. return NS_ERROR_OUT_OF_MEMORY;
  521. }
  522. memset(mPreviousRow.get(), 0, inputWidthInBytes);
  523. memset(mCurrentRow.get(), 0, inputWidthInBytes);
  524. ConfigureFilter(mNext.InputSize(), sizeof(uint32_t));
  525. return NS_OK;
  526. }
  527. Maybe<SurfaceInvalidRect> TakeInvalidRect() override
  528. {
  529. return mNext.TakeInvalidRect();
  530. }
  531. protected:
  532. uint8_t* DoResetToFirstRow() override
  533. {
  534. mRow = 0;
  535. mPass = std::min(mPass + 1, 7);
  536. uint8_t* rowPtr = mNext.ResetToFirstRow();
  537. if (mPass == 7) {
  538. // Short circuit this filter on the final pass, since all pixels have
  539. // their final values at that point.
  540. return rowPtr;
  541. }
  542. return mCurrentRow.get();
  543. }
  544. uint8_t* DoAdvanceRow() override
  545. {
  546. MOZ_ASSERT(0 < mPass && mPass <= 7, "Invalid pass");
  547. int32_t currentRow = mRow;
  548. ++mRow;
  549. if (mPass == 7) {
  550. // On the final pass we short circuit this filter totally.
  551. return mNext.AdvanceRow();
  552. }
  553. const int32_t lastImportantRow = LastImportantRow(InputSize().height, mPass);
  554. if (currentRow > lastImportantRow) {
  555. return nullptr; // This pass is already complete.
  556. }
  557. if (!IsImportantRow(currentRow, mPass)) {
  558. // We just ignore whatever the caller gives us for these rows. We'll
  559. // interpolate them in later.
  560. return mCurrentRow.get();
  561. }
  562. // This is an important row. We need to perform horizontal interpolation for
  563. // these rows.
  564. InterpolateHorizontally(mCurrentRow.get(), InputSize().width, mPass);
  565. // Interpolate vertically between the previous important row and the current
  566. // important row. We skip this if the current row is 0 (which is always an
  567. // important row), because in that case there is no previous important row
  568. // to interpolate with.
  569. if (currentRow != 0) {
  570. InterpolateVertically(mPreviousRow.get(), mCurrentRow.get(), mPass, mNext);
  571. }
  572. // Write out the current row itself, which, being an important row, does not
  573. // need vertical interpolation.
  574. uint32_t* currentRowAsPixels = reinterpret_cast<uint32_t*>(mCurrentRow.get());
  575. mNext.WriteBuffer(currentRowAsPixels);
  576. if (currentRow == lastImportantRow) {
  577. // This is the last important row, which completes this pass. Note that
  578. // for very small images, this may be the first row! Since there won't be
  579. // another important row, there's nothing to interpolate with vertically,
  580. // so we just duplicate this row until the end of the image.
  581. while (mNext.WriteBuffer(currentRowAsPixels) == WriteState::NEED_MORE_DATA) { }
  582. // All of the remaining rows in the image were determined above, so we're done.
  583. return nullptr;
  584. }
  585. // The current row is now the previous important row; save it.
  586. Swap(mPreviousRow, mCurrentRow);
  587. MOZ_ASSERT(mRow < InputSize().height, "Reached the end of the surface without "
  588. "hitting the last important row?");
  589. return mCurrentRow.get();
  590. }
  591. private:
  592. static void InterpolateVertically(uint8_t* aPreviousRow,
  593. uint8_t* aCurrentRow,
  594. uint8_t aPass,
  595. SurfaceFilter& aNext)
  596. {
  597. const float* weights = InterpolationWeights(ImportantRowStride(aPass));
  598. // We need to interpolate vertically to generate the rows between the
  599. // previous important row and the next one. Recall that important rows are
  600. // rows which contain at least some final pixels; see
  601. // InterpolateHorizontally() for some additional explanation as to what that
  602. // means. Note that we've already written out the previous important row, so
  603. // we start the iteration at 1.
  604. for (int32_t outRow = 1; outRow < ImportantRowStride(aPass); ++outRow) {
  605. const float weight = weights[outRow];
  606. // We iterate through the previous and current important row every time we
  607. // write out an interpolated row, so we need to copy the pointers.
  608. uint8_t* prevRowBytes = aPreviousRow;
  609. uint8_t* currRowBytes = aCurrentRow;
  610. // Write out the interpolated pixels. Interpolation is componentwise.
  611. aNext.template WritePixelsToRow<uint32_t>([&]{
  612. uint32_t pixel = 0;
  613. auto* component = reinterpret_cast<uint8_t*>(&pixel);
  614. *component++ = InterpolateByte(*prevRowBytes++, *currRowBytes++, weight);
  615. *component++ = InterpolateByte(*prevRowBytes++, *currRowBytes++, weight);
  616. *component++ = InterpolateByte(*prevRowBytes++, *currRowBytes++, weight);
  617. *component++ = InterpolateByte(*prevRowBytes++, *currRowBytes++, weight);
  618. return AsVariant(pixel);
  619. });
  620. }
  621. }
  622. static void InterpolateHorizontally(uint8_t* aRow, int32_t aWidth, uint8_t aPass)
  623. {
  624. // Collect the data we'll need to perform horizontal interpolation. The
  625. // terminology here bears some explanation: a "final pixel" is a pixel which
  626. // has received its final value. On each pass, a new set of pixels receives
  627. // their final value; see the diagram above of the 8x8 pattern that ADAM7
  628. // uses. Any pixel which hasn't received its final value on this pass
  629. // derives its value from either horizontal or vertical interpolation
  630. // instead.
  631. const size_t finalPixelStride = FinalPixelStride(aPass);
  632. const size_t finalPixelStrideBytes = finalPixelStride * sizeof(uint32_t);
  633. const size_t lastFinalPixel = LastFinalPixel(aWidth, aPass);
  634. const size_t lastFinalPixelBytes = lastFinalPixel * sizeof(uint32_t);
  635. const float* weights = InterpolationWeights(finalPixelStride);
  636. // Interpolate blocks of pixels which lie between two final pixels.
  637. // Horizontal interpolation is done in place, as we'll need the results
  638. // later when we vertically interpolate.
  639. for (size_t blockBytes = 0;
  640. blockBytes < lastFinalPixelBytes;
  641. blockBytes += finalPixelStrideBytes) {
  642. uint8_t* finalPixelA = aRow + blockBytes;
  643. uint8_t* finalPixelB = aRow + blockBytes + finalPixelStrideBytes;
  644. MOZ_ASSERT(finalPixelA < aRow + aWidth * sizeof(uint32_t),
  645. "Running off end of buffer");
  646. MOZ_ASSERT(finalPixelB < aRow + aWidth * sizeof(uint32_t),
  647. "Running off end of buffer");
  648. // Interpolate the individual pixels componentwise. Note that we start
  649. // iteration at 1 since we don't need to apply any interpolation to the
  650. // first pixel in the block, which has its final value.
  651. for (size_t pixelIndex = 1; pixelIndex < finalPixelStride; ++pixelIndex) {
  652. const float weight = weights[pixelIndex];
  653. uint8_t* pixel = aRow + blockBytes + pixelIndex * sizeof(uint32_t);
  654. MOZ_ASSERT(pixel < aRow + aWidth * sizeof(uint32_t), "Running off end of buffer");
  655. for (size_t component = 0; component < sizeof(uint32_t); ++component) {
  656. pixel[component] =
  657. InterpolateByte(finalPixelA[component], finalPixelB[component], weight);
  658. }
  659. }
  660. }
  661. // For the pixels after the last final pixel in the row, there isn't a
  662. // second final pixel to interpolate with, so just duplicate.
  663. uint32_t* rowPixels = reinterpret_cast<uint32_t*>(aRow);
  664. uint32_t pixelToDuplicate = rowPixels[lastFinalPixel];
  665. for (int32_t pixelIndex = lastFinalPixel + 1;
  666. pixelIndex < aWidth;
  667. ++pixelIndex) {
  668. MOZ_ASSERT(pixelIndex < aWidth, "Running off end of buffer");
  669. rowPixels[pixelIndex] = pixelToDuplicate;
  670. }
  671. }
  672. static uint8_t InterpolateByte(uint8_t aByteA, uint8_t aByteB, float aWeight)
  673. {
  674. return uint8_t(aByteA * aWeight + aByteB * (1.0f - aWeight));
  675. }
  676. static int32_t ImportantRowStride(uint8_t aPass)
  677. {
  678. MOZ_ASSERT(0 < aPass && aPass <= 7, "Invalid pass");
  679. // The stride between important rows for each pass, with a dummy value for
  680. // the nonexistent pass 0.
  681. static int32_t strides[] = { 1, 8, 8, 4, 4, 2, 2, 1 };
  682. return strides[aPass];
  683. }
  684. static bool IsImportantRow(int32_t aRow, uint8_t aPass)
  685. {
  686. MOZ_ASSERT(aRow >= 0);
  687. // Whether the row is important comes down to divisibility by the stride for
  688. // this pass, which is always a power of 2, so we can check using a mask.
  689. int32_t mask = ImportantRowStride(aPass) - 1;
  690. return (aRow & mask) == 0;
  691. }
  692. static int32_t LastImportantRow(int32_t aHeight, uint8_t aPass)
  693. {
  694. MOZ_ASSERT(aHeight > 0);
  695. // We can find the last important row using the same mask trick as above.
  696. int32_t lastRow = aHeight - 1;
  697. int32_t mask = ImportantRowStride(aPass) - 1;
  698. return lastRow - (lastRow & mask);
  699. }
  700. static size_t FinalPixelStride(uint8_t aPass)
  701. {
  702. MOZ_ASSERT(0 < aPass && aPass <= 7, "Invalid pass");
  703. // The stride between the final pixels in important rows for each pass, with
  704. // a dummy value for the nonexistent pass 0.
  705. static size_t strides[] = { 1, 8, 4, 4, 2, 2, 1, 1 };
  706. return strides[aPass];
  707. }
  708. static size_t LastFinalPixel(int32_t aWidth, uint8_t aPass)
  709. {
  710. MOZ_ASSERT(aWidth >= 0);
  711. // Again, we can use the mask trick above to find the last important pixel.
  712. int32_t lastColumn = aWidth - 1;
  713. size_t mask = FinalPixelStride(aPass) - 1;
  714. return lastColumn - (lastColumn & mask);
  715. }
  716. static const float* InterpolationWeights(int32_t aStride)
  717. {
  718. // Precalculated interpolation weights. These are used to interpolate
  719. // between final pixels or between important rows. Although no interpolation
  720. // is actually applied to the previous final pixel or important row value,
  721. // the arrays still start with 1.0f, which is always skipped, primarily
  722. // because otherwise |stride1Weights| would have zero elements.
  723. static float stride8Weights[] =
  724. { 1.0f, 7 / 8.0f, 6 / 8.0f, 5 / 8.0f, 4 / 8.0f, 3 / 8.0f, 2 / 8.0f, 1 / 8.0f };
  725. static float stride4Weights[] = { 1.0f, 3 / 4.0f, 2 / 4.0f, 1 / 4.0f };
  726. static float stride2Weights[] = { 1.0f, 1 / 2.0f };
  727. static float stride1Weights[] = { 1.0f };
  728. switch (aStride) {
  729. case 8: return stride8Weights;
  730. case 4: return stride4Weights;
  731. case 2: return stride2Weights;
  732. case 1: return stride1Weights;
  733. default: MOZ_CRASH();
  734. }
  735. }
  736. Next mNext; /// The next SurfaceFilter in the chain.
  737. UniquePtr<uint8_t[]> mPreviousRow; /// The last important row (i.e., row with
  738. /// final pixel values) that got written to.
  739. UniquePtr<uint8_t[]> mCurrentRow; /// The row that's being written to right
  740. /// now.
  741. uint8_t mPass; /// Which ADAM7 pass we're on. Valid passes
  742. /// are 1..7 during processing and 0 prior
  743. /// to configuraiton.
  744. int32_t mRow; /// The row we're currently reading.
  745. };
  746. } // namespace image
  747. } // namespace mozilla
  748. #endif // mozilla_image_SurfaceFilters_h