convert_test.cc 118 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862
  1. /*
  2. * Copyright 2011 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <stdlib.h>
  11. #include <time.h>
  12. #include "libyuv/basic_types.h"
  13. #include "libyuv/compare.h"
  14. #include "libyuv/convert.h"
  15. #include "libyuv/convert_argb.h"
  16. #include "libyuv/convert_from.h"
  17. #include "libyuv/convert_from_argb.h"
  18. #include "libyuv/cpu_id.h"
  19. #ifdef HAVE_JPEG
  20. #include "libyuv/mjpeg_decoder.h"
  21. #endif
  22. #include "libyuv/planar_functions.h"
  23. #include "libyuv/rotate.h"
  24. #include "libyuv/video_common.h"
  25. #include "../unit_test/unit_test.h"
  26. namespace libyuv {
  27. #define SUBSAMPLE(v, a) ((((v) + (a) - 1)) / (a))
  28. #define TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  29. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \
  30. TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
  31. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  32. const int kHeight = benchmark_height_; \
  33. align_buffer_page_end(src_y, kWidth * kHeight + OFF); \
  34. align_buffer_page_end(src_u, \
  35. SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
  36. SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \
  37. align_buffer_page_end(src_v, \
  38. SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
  39. SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \
  40. align_buffer_page_end(dst_y_c, kWidth * kHeight); \
  41. align_buffer_page_end(dst_u_c, \
  42. SUBSAMPLE(kWidth, SUBSAMP_X) * \
  43. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  44. align_buffer_page_end(dst_v_c, \
  45. SUBSAMPLE(kWidth, SUBSAMP_X) * \
  46. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  47. align_buffer_page_end(dst_y_opt, kWidth * kHeight); \
  48. align_buffer_page_end(dst_u_opt, \
  49. SUBSAMPLE(kWidth, SUBSAMP_X) * \
  50. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  51. align_buffer_page_end(dst_v_opt, \
  52. SUBSAMPLE(kWidth, SUBSAMP_X) * \
  53. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  54. for (int i = 0; i < kHeight; ++i) \
  55. for (int j = 0; j < kWidth; ++j) \
  56. src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \
  57. for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \
  58. for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \
  59. src_u[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
  60. (fastrand() & 0xff); \
  61. src_v[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
  62. (fastrand() & 0xff); \
  63. } \
  64. } \
  65. memset(dst_y_c, 1, kWidth * kHeight); \
  66. memset(dst_u_c, 2, SUBSAMPLE(kWidth, SUBSAMP_X) * \
  67. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  68. memset(dst_v_c, 3, SUBSAMPLE(kWidth, SUBSAMP_X) * \
  69. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  70. memset(dst_y_opt, 101, kWidth * kHeight); \
  71. memset(dst_u_opt, 102, SUBSAMPLE(kWidth, SUBSAMP_X) * \
  72. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  73. memset(dst_v_opt, 103, SUBSAMPLE(kWidth, SUBSAMP_X) * \
  74. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  75. MaskCpuFlags(disable_cpu_flags_); \
  76. SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \
  77. src_u + OFF, \
  78. SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
  79. src_v + OFF, \
  80. SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
  81. dst_y_c, kWidth, \
  82. dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), \
  83. dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X), \
  84. kWidth, NEG kHeight); \
  85. MaskCpuFlags(benchmark_cpu_info_); \
  86. for (int i = 0; i < benchmark_iterations_; ++i) { \
  87. SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \
  88. src_u + OFF, \
  89. SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
  90. src_v + OFF, \
  91. SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
  92. dst_y_opt, kWidth, \
  93. dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \
  94. dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \
  95. kWidth, NEG kHeight); \
  96. } \
  97. int max_diff = 0; \
  98. for (int i = 0; i < kHeight; ++i) { \
  99. for (int j = 0; j < kWidth; ++j) { \
  100. int abs_diff = \
  101. abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
  102. static_cast<int>(dst_y_opt[i * kWidth + j])); \
  103. if (abs_diff > max_diff) { \
  104. max_diff = abs_diff; \
  105. } \
  106. } \
  107. } \
  108. EXPECT_EQ(0, max_diff); \
  109. for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
  110. for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
  111. int abs_diff = \
  112. abs(static_cast<int>(dst_u_c[i * \
  113. SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
  114. static_cast<int>(dst_u_opt[i * \
  115. SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
  116. if (abs_diff > max_diff) { \
  117. max_diff = abs_diff; \
  118. } \
  119. } \
  120. } \
  121. EXPECT_LE(max_diff, 3); \
  122. for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
  123. for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
  124. int abs_diff = \
  125. abs(static_cast<int>(dst_v_c[i * \
  126. SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
  127. static_cast<int>(dst_v_opt[i * \
  128. SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
  129. if (abs_diff > max_diff) { \
  130. max_diff = abs_diff; \
  131. } \
  132. } \
  133. } \
  134. EXPECT_LE(max_diff, 3); \
  135. free_aligned_buffer_page_end(dst_y_c); \
  136. free_aligned_buffer_page_end(dst_u_c); \
  137. free_aligned_buffer_page_end(dst_v_c); \
  138. free_aligned_buffer_page_end(dst_y_opt); \
  139. free_aligned_buffer_page_end(dst_u_opt); \
  140. free_aligned_buffer_page_end(dst_v_opt); \
  141. free_aligned_buffer_page_end(src_y); \
  142. free_aligned_buffer_page_end(src_u); \
  143. free_aligned_buffer_page_end(src_v); \
  144. }
  145. #define TESTPLANARTOP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  146. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
  147. TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  148. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  149. benchmark_width_ - 4, _Any, +, 0) \
  150. TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  151. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  152. benchmark_width_, _Unaligned, +, 1) \
  153. TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  154. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  155. benchmark_width_, _Invert, -, 0) \
  156. TESTPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  157. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  158. benchmark_width_, _Opt, +, 0)
  159. TESTPLANARTOP(I420, 2, 2, I420, 2, 2)
  160. TESTPLANARTOP(I422, 2, 1, I420, 2, 2)
  161. TESTPLANARTOP(I444, 1, 1, I420, 2, 2)
  162. TESTPLANARTOP(I411, 4, 1, I420, 2, 2)
  163. TESTPLANARTOP(I420, 2, 2, I422, 2, 1)
  164. TESTPLANARTOP(I420, 2, 2, I444, 1, 1)
  165. TESTPLANARTOP(I420, 2, 2, I411, 4, 1)
  166. TESTPLANARTOP(I420, 2, 2, I420Mirror, 2, 2)
  167. TESTPLANARTOP(I422, 2, 1, I422, 2, 1)
  168. TESTPLANARTOP(I444, 1, 1, I444, 1, 1)
  169. #define TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  170. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \
  171. TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
  172. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  173. const int kHeight = benchmark_height_; \
  174. align_buffer_page_end(src_y, kWidth * kHeight + OFF); \
  175. align_buffer_page_end(src_u, \
  176. SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
  177. SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \
  178. align_buffer_page_end(src_v, \
  179. SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
  180. SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \
  181. align_buffer_page_end(dst_y_c, kWidth * kHeight); \
  182. align_buffer_page_end(dst_uv_c, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \
  183. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  184. align_buffer_page_end(dst_y_opt, kWidth * kHeight); \
  185. align_buffer_page_end(dst_uv_opt, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \
  186. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  187. for (int i = 0; i < kHeight; ++i) \
  188. for (int j = 0; j < kWidth; ++j) \
  189. src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \
  190. for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \
  191. for (int j = 0; j < SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \
  192. src_u[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
  193. (fastrand() & 0xff); \
  194. src_v[(i * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
  195. (fastrand() & 0xff); \
  196. } \
  197. } \
  198. memset(dst_y_c, 1, kWidth * kHeight); \
  199. memset(dst_uv_c, 2, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \
  200. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  201. memset(dst_y_opt, 101, kWidth * kHeight); \
  202. memset(dst_uv_opt, 102, SUBSAMPLE(kWidth * 2, SUBSAMP_X) * \
  203. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  204. MaskCpuFlags(disable_cpu_flags_); \
  205. SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \
  206. src_u + OFF, \
  207. SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
  208. src_v + OFF, \
  209. SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
  210. dst_y_c, kWidth, \
  211. dst_uv_c, SUBSAMPLE(kWidth * 2, SUBSAMP_X), \
  212. kWidth, NEG kHeight); \
  213. MaskCpuFlags(benchmark_cpu_info_); \
  214. for (int i = 0; i < benchmark_iterations_; ++i) { \
  215. SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \
  216. src_u + OFF, \
  217. SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
  218. src_v + OFF, \
  219. SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
  220. dst_y_opt, kWidth, \
  221. dst_uv_opt, \
  222. SUBSAMPLE(kWidth * 2, SUBSAMP_X), \
  223. kWidth, NEG kHeight); \
  224. } \
  225. int max_diff = 0; \
  226. for (int i = 0; i < kHeight; ++i) { \
  227. for (int j = 0; j < kWidth; ++j) { \
  228. int abs_diff = \
  229. abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
  230. static_cast<int>(dst_y_opt[i * kWidth + j])); \
  231. if (abs_diff > max_diff) { \
  232. max_diff = abs_diff; \
  233. } \
  234. } \
  235. } \
  236. EXPECT_LE(max_diff, 1); \
  237. for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
  238. for (int j = 0; j < SUBSAMPLE(kWidth * 2, SUBSAMP_X); ++j) { \
  239. int abs_diff = \
  240. abs(static_cast<int>(dst_uv_c[i * \
  241. SUBSAMPLE(kWidth * 2, SUBSAMP_X) + j]) - \
  242. static_cast<int>(dst_uv_opt[i * \
  243. SUBSAMPLE(kWidth * 2, SUBSAMP_X) + j])); \
  244. if (abs_diff > max_diff) { \
  245. max_diff = abs_diff; \
  246. } \
  247. } \
  248. } \
  249. EXPECT_LE(max_diff, 1); \
  250. free_aligned_buffer_page_end(dst_y_c); \
  251. free_aligned_buffer_page_end(dst_uv_c); \
  252. free_aligned_buffer_page_end(dst_y_opt); \
  253. free_aligned_buffer_page_end(dst_uv_opt); \
  254. free_aligned_buffer_page_end(src_y); \
  255. free_aligned_buffer_page_end(src_u); \
  256. free_aligned_buffer_page_end(src_v); \
  257. }
  258. #define TESTPLANARTOBP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  259. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
  260. TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  261. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  262. benchmark_width_ - 4, _Any, +, 0) \
  263. TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  264. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  265. benchmark_width_, _Unaligned, +, 1) \
  266. TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  267. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  268. benchmark_width_, _Invert, -, 0) \
  269. TESTPLANARTOBPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  270. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  271. benchmark_width_, _Opt, +, 0)
  272. TESTPLANARTOBP(I420, 2, 2, NV12, 2, 2)
  273. TESTPLANARTOBP(I420, 2, 2, NV21, 2, 2)
  274. #define TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  275. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \
  276. TEST_F(LibYUVConvertTest, SRC_FMT_PLANAR##To##FMT_PLANAR##N) { \
  277. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  278. const int kHeight = benchmark_height_; \
  279. align_buffer_page_end(src_y, kWidth * kHeight + OFF); \
  280. align_buffer_page_end(src_uv, 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X) * \
  281. SUBSAMPLE(kHeight, SRC_SUBSAMP_Y) + OFF); \
  282. align_buffer_page_end(dst_y_c, kWidth * kHeight); \
  283. align_buffer_page_end(dst_u_c, \
  284. SUBSAMPLE(kWidth, SUBSAMP_X) * \
  285. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  286. align_buffer_page_end(dst_v_c, \
  287. SUBSAMPLE(kWidth, SUBSAMP_X) * \
  288. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  289. align_buffer_page_end(dst_y_opt, kWidth * kHeight); \
  290. align_buffer_page_end(dst_u_opt, \
  291. SUBSAMPLE(kWidth, SUBSAMP_X) * \
  292. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  293. align_buffer_page_end(dst_v_opt, \
  294. SUBSAMPLE(kWidth, SUBSAMP_X) * \
  295. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  296. for (int i = 0; i < kHeight; ++i) \
  297. for (int j = 0; j < kWidth; ++j) \
  298. src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \
  299. for (int i = 0; i < SUBSAMPLE(kHeight, SRC_SUBSAMP_Y); ++i) { \
  300. for (int j = 0; j < 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X); ++j) { \
  301. src_uv[(i * 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X)) + j + OFF] = \
  302. (fastrand() & 0xff); \
  303. } \
  304. } \
  305. memset(dst_y_c, 1, kWidth * kHeight); \
  306. memset(dst_u_c, 2, SUBSAMPLE(kWidth, SUBSAMP_X) * \
  307. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  308. memset(dst_v_c, 3, SUBSAMPLE(kWidth, SUBSAMP_X) * \
  309. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  310. memset(dst_y_opt, 101, kWidth * kHeight); \
  311. memset(dst_u_opt, 102, SUBSAMPLE(kWidth, SUBSAMP_X) * \
  312. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  313. memset(dst_v_opt, 103, SUBSAMPLE(kWidth, SUBSAMP_X) * \
  314. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  315. MaskCpuFlags(disable_cpu_flags_); \
  316. SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \
  317. src_uv + OFF, \
  318. 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
  319. dst_y_c, kWidth, \
  320. dst_u_c, SUBSAMPLE(kWidth, SUBSAMP_X), \
  321. dst_v_c, SUBSAMPLE(kWidth, SUBSAMP_X), \
  322. kWidth, NEG kHeight); \
  323. MaskCpuFlags(benchmark_cpu_info_); \
  324. for (int i = 0; i < benchmark_iterations_; ++i) { \
  325. SRC_FMT_PLANAR##To##FMT_PLANAR(src_y + OFF, kWidth, \
  326. src_uv + OFF, \
  327. 2 * SUBSAMPLE(kWidth, SRC_SUBSAMP_X), \
  328. dst_y_opt, kWidth, \
  329. dst_u_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \
  330. dst_v_opt, SUBSAMPLE(kWidth, SUBSAMP_X), \
  331. kWidth, NEG kHeight); \
  332. } \
  333. int max_diff = 0; \
  334. for (int i = 0; i < kHeight; ++i) { \
  335. for (int j = 0; j < kWidth; ++j) { \
  336. int abs_diff = \
  337. abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
  338. static_cast<int>(dst_y_opt[i * kWidth + j])); \
  339. if (abs_diff > max_diff) { \
  340. max_diff = abs_diff; \
  341. } \
  342. } \
  343. } \
  344. EXPECT_LE(max_diff, 1); \
  345. for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
  346. for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
  347. int abs_diff = \
  348. abs(static_cast<int>(dst_u_c[i * \
  349. SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
  350. static_cast<int>(dst_u_opt[i * \
  351. SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
  352. if (abs_diff > max_diff) { \
  353. max_diff = abs_diff; \
  354. } \
  355. } \
  356. } \
  357. EXPECT_LE(max_diff, 1); \
  358. for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
  359. for (int j = 0; j < SUBSAMPLE(kWidth, SUBSAMP_X); ++j) { \
  360. int abs_diff = \
  361. abs(static_cast<int>(dst_v_c[i * \
  362. SUBSAMPLE(kWidth, SUBSAMP_X) + j]) - \
  363. static_cast<int>(dst_v_opt[i * \
  364. SUBSAMPLE(kWidth, SUBSAMP_X) + j])); \
  365. if (abs_diff > max_diff) { \
  366. max_diff = abs_diff; \
  367. } \
  368. } \
  369. } \
  370. EXPECT_LE(max_diff, 1); \
  371. free_aligned_buffer_page_end(dst_y_c); \
  372. free_aligned_buffer_page_end(dst_u_c); \
  373. free_aligned_buffer_page_end(dst_v_c); \
  374. free_aligned_buffer_page_end(dst_y_opt); \
  375. free_aligned_buffer_page_end(dst_u_opt); \
  376. free_aligned_buffer_page_end(dst_v_opt); \
  377. free_aligned_buffer_page_end(src_y); \
  378. free_aligned_buffer_page_end(src_uv); \
  379. }
  380. #define TESTBIPLANARTOP(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  381. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
  382. TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  383. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  384. benchmark_width_ - 4, _Any, +, 0) \
  385. TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  386. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  387. benchmark_width_, _Unaligned, +, 1) \
  388. TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  389. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  390. benchmark_width_, _Invert, -, 0) \
  391. TESTBIPLANARTOPI(SRC_FMT_PLANAR, SRC_SUBSAMP_X, SRC_SUBSAMP_Y, \
  392. FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  393. benchmark_width_, _Opt, +, 0)
  394. TESTBIPLANARTOP(NV12, 2, 2, I420, 2, 2)
  395. TESTBIPLANARTOP(NV21, 2, 2, I420, 2, 2)
  396. #define ALIGNINT(V, ALIGN) (((V) + (ALIGN) - 1) / (ALIGN) * (ALIGN))
  397. #define TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  398. YALIGN, W1280, DIFF, N, NEG, OFF, FMT_C, BPP_C) \
  399. TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \
  400. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  401. const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
  402. const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
  403. const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
  404. const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
  405. align_buffer_page_end(src_y, kWidth * kHeight + OFF); \
  406. align_buffer_page_end(src_u, kSizeUV + OFF); \
  407. align_buffer_page_end(src_v, kSizeUV + OFF); \
  408. align_buffer_page_end(dst_argb_c, kStrideB * kHeight + OFF); \
  409. align_buffer_page_end(dst_argb_opt, kStrideB * kHeight + OFF); \
  410. for (int i = 0; i < kWidth * kHeight; ++i) { \
  411. src_y[i + OFF] = (fastrand() & 0xff); \
  412. } \
  413. for (int i = 0; i < kSizeUV; ++i) { \
  414. src_u[i + OFF] = (fastrand() & 0xff); \
  415. src_v[i + OFF] = (fastrand() & 0xff); \
  416. } \
  417. memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \
  418. memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \
  419. MaskCpuFlags(disable_cpu_flags_); \
  420. FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \
  421. src_u + OFF, kStrideUV, \
  422. src_v + OFF, kStrideUV, \
  423. dst_argb_c + OFF, kStrideB, \
  424. kWidth, NEG kHeight); \
  425. MaskCpuFlags(benchmark_cpu_info_); \
  426. for (int i = 0; i < benchmark_iterations_; ++i) { \
  427. FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \
  428. src_u + OFF, kStrideUV, \
  429. src_v + OFF, kStrideUV, \
  430. dst_argb_opt + OFF, kStrideB, \
  431. kWidth, NEG kHeight); \
  432. } \
  433. int max_diff = 0; \
  434. /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \
  435. align_buffer_page_end(dst_argb32_c, kWidth * BPP_C * kHeight); \
  436. align_buffer_page_end(dst_argb32_opt, kWidth * BPP_C * kHeight); \
  437. memset(dst_argb32_c, 2, kWidth * BPP_C * kHeight); \
  438. memset(dst_argb32_opt, 102, kWidth * BPP_C * kHeight); \
  439. FMT_B##To##FMT_C(dst_argb_c + OFF, kStrideB, \
  440. dst_argb32_c, kWidth * BPP_C , \
  441. kWidth, kHeight); \
  442. FMT_B##To##FMT_C(dst_argb_opt + OFF, kStrideB, \
  443. dst_argb32_opt, kWidth * BPP_C , \
  444. kWidth, kHeight); \
  445. for (int i = 0; i < kWidth * BPP_C * kHeight; ++i) { \
  446. int abs_diff = \
  447. abs(static_cast<int>(dst_argb32_c[i]) - \
  448. static_cast<int>(dst_argb32_opt[i])); \
  449. if (abs_diff > max_diff) { \
  450. max_diff = abs_diff; \
  451. } \
  452. } \
  453. EXPECT_LE(max_diff, DIFF); \
  454. free_aligned_buffer_page_end(src_y); \
  455. free_aligned_buffer_page_end(src_u); \
  456. free_aligned_buffer_page_end(src_v); \
  457. free_aligned_buffer_page_end(dst_argb_c); \
  458. free_aligned_buffer_page_end(dst_argb_opt); \
  459. free_aligned_buffer_page_end(dst_argb32_c); \
  460. free_aligned_buffer_page_end(dst_argb32_opt); \
  461. }
  462. #define TESTPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  463. YALIGN, DIFF, FMT_C, BPP_C) \
  464. TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  465. YALIGN, benchmark_width_ - 4, DIFF, _Any, +, 0, FMT_C, BPP_C) \
  466. TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  467. YALIGN, benchmark_width_, DIFF, _Unaligned, +, 1, FMT_C, BPP_C) \
  468. TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  469. YALIGN, benchmark_width_, DIFF, _Invert, -, 0, FMT_C, BPP_C) \
  470. TESTPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  471. YALIGN, benchmark_width_, DIFF, _Opt, +, 0, FMT_C, BPP_C)
  472. TESTPLANARTOB(I420, 2, 2, ARGB, 4, 4, 1, 2, ARGB, 4)
  473. TESTPLANARTOB(J420, 2, 2, ARGB, 4, 4, 1, 2, ARGB, 4)
  474. TESTPLANARTOB(J420, 2, 2, ABGR, 4, 4, 1, 2, ARGB, 4)
  475. TESTPLANARTOB(H420, 2, 2, ARGB, 4, 4, 1, 2, ARGB, 4)
  476. TESTPLANARTOB(H420, 2, 2, ABGR, 4, 4, 1, 2, ARGB, 4)
  477. TESTPLANARTOB(I420, 2, 2, BGRA, 4, 4, 1, 2, ARGB, 4)
  478. TESTPLANARTOB(I420, 2, 2, ABGR, 4, 4, 1, 2, ARGB, 4)
  479. TESTPLANARTOB(I420, 2, 2, RGBA, 4, 4, 1, 2, ARGB, 4)
  480. TESTPLANARTOB(I420, 2, 2, RAW, 3, 3, 1, 2, ARGB, 4)
  481. TESTPLANARTOB(I420, 2, 2, RGB24, 3, 3, 1, 2, ARGB, 4)
  482. TESTPLANARTOB(I420, 2, 2, RGB565, 2, 2, 1, 9, ARGB, 4)
  483. TESTPLANARTOB(I420, 2, 2, ARGB1555, 2, 2, 1, 9, ARGB, 4)
  484. TESTPLANARTOB(I420, 2, 2, ARGB4444, 2, 2, 1, 17, ARGB, 4)
  485. TESTPLANARTOB(I422, 2, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
  486. TESTPLANARTOB(J422, 2, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
  487. TESTPLANARTOB(J422, 2, 1, ABGR, 4, 4, 1, 2, ARGB, 4)
  488. TESTPLANARTOB(H422, 2, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
  489. TESTPLANARTOB(H422, 2, 1, ABGR, 4, 4, 1, 2, ARGB, 4)
  490. TESTPLANARTOB(I422, 2, 1, BGRA, 4, 4, 1, 2, ARGB, 4)
  491. TESTPLANARTOB(I422, 2, 1, ABGR, 4, 4, 1, 2, ARGB, 4)
  492. TESTPLANARTOB(I422, 2, 1, RGBA, 4, 4, 1, 2, ARGB, 4)
  493. TESTPLANARTOB(I411, 4, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
  494. TESTPLANARTOB(I444, 1, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
  495. TESTPLANARTOB(J444, 1, 1, ARGB, 4, 4, 1, 2, ARGB, 4)
  496. TESTPLANARTOB(I444, 1, 1, ABGR, 4, 4, 1, 2, ARGB, 4)
  497. TESTPLANARTOB(I420, 2, 2, YUY2, 2, 4, 1, 1, ARGB, 4)
  498. TESTPLANARTOB(I420, 2, 2, UYVY, 2, 4, 1, 1, ARGB, 4)
  499. TESTPLANARTOB(I422, 2, 1, YUY2, 2, 4, 1, 0, ARGB, 4)
  500. TESTPLANARTOB(I422, 2, 1, UYVY, 2, 4, 1, 0, ARGB, 4)
  501. TESTPLANARTOB(I420, 2, 2, I400, 1, 1, 1, 0, ARGB, 4)
  502. TESTPLANARTOB(J420, 2, 2, J400, 1, 1, 1, 0, ARGB, 4)
  503. #define TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  504. YALIGN, W1280, DIFF, N, NEG, OFF, ATTEN) \
  505. TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \
  506. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  507. const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
  508. const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
  509. const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
  510. const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
  511. align_buffer_page_end(src_y, kWidth * kHeight + OFF); \
  512. align_buffer_page_end(src_u, kSizeUV + OFF); \
  513. align_buffer_page_end(src_v, kSizeUV + OFF); \
  514. align_buffer_page_end(src_a, kWidth * kHeight + OFF); \
  515. align_buffer_page_end(dst_argb_c, kStrideB * kHeight + OFF); \
  516. align_buffer_page_end(dst_argb_opt, kStrideB * kHeight + OFF); \
  517. for (int i = 0; i < kWidth * kHeight; ++i) { \
  518. src_y[i + OFF] = (fastrand() & 0xff); \
  519. src_a[i + OFF] = (fastrand() & 0xff); \
  520. } \
  521. for (int i = 0; i < kSizeUV; ++i) { \
  522. src_u[i + OFF] = (fastrand() & 0xff); \
  523. src_v[i + OFF] = (fastrand() & 0xff); \
  524. } \
  525. memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \
  526. memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \
  527. MaskCpuFlags(disable_cpu_flags_); \
  528. FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \
  529. src_u + OFF, kStrideUV, \
  530. src_v + OFF, kStrideUV, \
  531. src_a + OFF, kWidth, \
  532. dst_argb_c + OFF, kStrideB, \
  533. kWidth, NEG kHeight, ATTEN); \
  534. MaskCpuFlags(benchmark_cpu_info_); \
  535. for (int i = 0; i < benchmark_iterations_; ++i) { \
  536. FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \
  537. src_u + OFF, kStrideUV, \
  538. src_v + OFF, kStrideUV, \
  539. src_a + OFF, kWidth, \
  540. dst_argb_opt + OFF, kStrideB, \
  541. kWidth, NEG kHeight, ATTEN); \
  542. } \
  543. int max_diff = 0; \
  544. for (int i = 0; i < kWidth * BPP_B * kHeight; ++i) { \
  545. int abs_diff = \
  546. abs(static_cast<int>(dst_argb_c[i + OFF]) - \
  547. static_cast<int>(dst_argb_opt[i + OFF])); \
  548. if (abs_diff > max_diff) { \
  549. max_diff = abs_diff; \
  550. } \
  551. } \
  552. EXPECT_LE(max_diff, DIFF); \
  553. free_aligned_buffer_page_end(src_y); \
  554. free_aligned_buffer_page_end(src_u); \
  555. free_aligned_buffer_page_end(src_v); \
  556. free_aligned_buffer_page_end(src_a); \
  557. free_aligned_buffer_page_end(dst_argb_c); \
  558. free_aligned_buffer_page_end(dst_argb_opt); \
  559. }
  560. #define TESTQPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  561. YALIGN, DIFF) \
  562. TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  563. YALIGN, benchmark_width_ - 4, DIFF, _Any, +, 0, 0) \
  564. TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  565. YALIGN, benchmark_width_, DIFF, _Unaligned, +, 1, 0) \
  566. TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  567. YALIGN, benchmark_width_, DIFF, _Invert, -, 0, 0) \
  568. TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  569. YALIGN, benchmark_width_, DIFF, _Opt, +, 0, 0) \
  570. TESTQPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  571. YALIGN, benchmark_width_, DIFF, _Premult, +, 0, 1)
  572. TESTQPLANARTOB(I420Alpha, 2, 2, ARGB, 4, 4, 1, 2)
  573. TESTQPLANARTOB(I420Alpha, 2, 2, ABGR, 4, 4, 1, 2)
  574. #define TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
  575. W1280, DIFF, N, NEG, OFF) \
  576. TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##N) { \
  577. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  578. const int kHeight = benchmark_height_; \
  579. const int kStrideB = kWidth * BPP_B; \
  580. const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
  581. align_buffer_page_end(src_y, kWidth * kHeight + OFF); \
  582. align_buffer_page_end(src_uv, \
  583. kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y) * 2 + OFF); \
  584. align_buffer_page_end(dst_argb_c, kStrideB * kHeight); \
  585. align_buffer_page_end(dst_argb_opt, kStrideB * kHeight); \
  586. for (int i = 0; i < kHeight; ++i) \
  587. for (int j = 0; j < kWidth; ++j) \
  588. src_y[i * kWidth + j + OFF] = (fastrand() & 0xff); \
  589. for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
  590. for (int j = 0; j < kStrideUV * 2; ++j) { \
  591. src_uv[i * kStrideUV * 2 + j + OFF] = (fastrand() & 0xff); \
  592. } \
  593. } \
  594. memset(dst_argb_c, 1, kStrideB * kHeight); \
  595. memset(dst_argb_opt, 101, kStrideB * kHeight); \
  596. MaskCpuFlags(disable_cpu_flags_); \
  597. FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \
  598. src_uv + OFF, kStrideUV * 2, \
  599. dst_argb_c, kWidth * BPP_B, \
  600. kWidth, NEG kHeight); \
  601. MaskCpuFlags(benchmark_cpu_info_); \
  602. for (int i = 0; i < benchmark_iterations_; ++i) { \
  603. FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \
  604. src_uv + OFF, kStrideUV * 2, \
  605. dst_argb_opt, kWidth * BPP_B, \
  606. kWidth, NEG kHeight); \
  607. } \
  608. /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \
  609. align_buffer_page_end(dst_argb32_c, kWidth * 4 * kHeight); \
  610. align_buffer_page_end(dst_argb32_opt, kWidth * 4 * kHeight); \
  611. memset(dst_argb32_c, 2, kWidth * 4 * kHeight); \
  612. memset(dst_argb32_opt, 102, kWidth * 4 * kHeight); \
  613. FMT_B##ToARGB(dst_argb_c, kStrideB, \
  614. dst_argb32_c, kWidth * 4, \
  615. kWidth, kHeight); \
  616. FMT_B##ToARGB(dst_argb_opt, kStrideB, \
  617. dst_argb32_opt, kWidth * 4, \
  618. kWidth, kHeight); \
  619. int max_diff = 0; \
  620. for (int i = 0; i < kHeight; ++i) { \
  621. for (int j = 0; j < kWidth * 4; ++j) { \
  622. int abs_diff = \
  623. abs(static_cast<int>(dst_argb32_c[i * kWidth * 4 + j]) - \
  624. static_cast<int>(dst_argb32_opt[i * kWidth * 4 + j])); \
  625. if (abs_diff > max_diff) { \
  626. max_diff = abs_diff; \
  627. } \
  628. } \
  629. } \
  630. EXPECT_LE(max_diff, DIFF); \
  631. free_aligned_buffer_page_end(src_y); \
  632. free_aligned_buffer_page_end(src_uv); \
  633. free_aligned_buffer_page_end(dst_argb_c); \
  634. free_aligned_buffer_page_end(dst_argb_opt); \
  635. free_aligned_buffer_page_end(dst_argb32_c); \
  636. free_aligned_buffer_page_end(dst_argb32_opt); \
  637. }
  638. #define TESTBIPLANARTOB(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, DIFF) \
  639. TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
  640. benchmark_width_ - 4, DIFF, _Any, +, 0) \
  641. TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
  642. benchmark_width_, DIFF, _Unaligned, +, 1) \
  643. TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
  644. benchmark_width_, DIFF, _Invert, -, 0) \
  645. TESTBIPLANARTOBI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, \
  646. benchmark_width_, DIFF, _Opt, +, 0)
  647. TESTBIPLANARTOB(NV12, 2, 2, ARGB, 4, 2)
  648. TESTBIPLANARTOB(NV21, 2, 2, ARGB, 4, 2)
  649. TESTBIPLANARTOB(NV12, 2, 2, RGB565, 2, 9)
  650. #define TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  651. W1280, DIFF, N, NEG, OFF) \
  652. TEST_F(LibYUVConvertTest, FMT_A##To##FMT_PLANAR##N) { \
  653. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  654. const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
  655. const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
  656. const int kStride = \
  657. (kStrideUV * SUBSAMP_X * 8 * BPP_A + 7) / 8; \
  658. align_buffer_page_end(src_argb, kStride * kHeight + OFF); \
  659. align_buffer_page_end(dst_y_c, kWidth * kHeight); \
  660. align_buffer_page_end(dst_u_c, \
  661. kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  662. align_buffer_page_end(dst_v_c, \
  663. kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  664. align_buffer_page_end(dst_y_opt, kWidth * kHeight); \
  665. align_buffer_page_end(dst_u_opt, \
  666. kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  667. align_buffer_page_end(dst_v_opt, \
  668. kStrideUV * \
  669. SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  670. memset(dst_y_c, 1, kWidth * kHeight); \
  671. memset(dst_u_c, 2, \
  672. kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  673. memset(dst_v_c, 3, \
  674. kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  675. memset(dst_y_opt, 101, kWidth * kHeight); \
  676. memset(dst_u_opt, 102, \
  677. kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  678. memset(dst_v_opt, 103, \
  679. kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  680. for (int i = 0; i < kHeight; ++i) \
  681. for (int j = 0; j < kStride; ++j) \
  682. src_argb[(i * kStride) + j + OFF] = (fastrand() & 0xff); \
  683. MaskCpuFlags(disable_cpu_flags_); \
  684. FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, \
  685. dst_y_c, kWidth, \
  686. dst_u_c, kStrideUV, \
  687. dst_v_c, kStrideUV, \
  688. kWidth, NEG kHeight); \
  689. MaskCpuFlags(benchmark_cpu_info_); \
  690. for (int i = 0; i < benchmark_iterations_; ++i) { \
  691. FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, \
  692. dst_y_opt, kWidth, \
  693. dst_u_opt, kStrideUV, \
  694. dst_v_opt, kStrideUV, \
  695. kWidth, NEG kHeight); \
  696. } \
  697. for (int i = 0; i < kHeight; ++i) { \
  698. for (int j = 0; j < kWidth; ++j) { \
  699. EXPECT_NEAR(static_cast<int>(dst_y_c[i * kWidth + j]), \
  700. static_cast<int>(dst_y_opt[i * kWidth + j]), DIFF); \
  701. } \
  702. } \
  703. for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
  704. for (int j = 0; j < kStrideUV; ++j) { \
  705. EXPECT_NEAR(static_cast<int>(dst_u_c[i * kStrideUV + j]), \
  706. static_cast<int>(dst_u_opt[i * kStrideUV + j]), DIFF); \
  707. } \
  708. } \
  709. for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
  710. for (int j = 0; j < kStrideUV; ++j) { \
  711. EXPECT_NEAR(static_cast<int>(dst_v_c[i * \
  712. kStrideUV + j]), \
  713. static_cast<int>(dst_v_opt[i * \
  714. kStrideUV + j]), DIFF); \
  715. } \
  716. } \
  717. free_aligned_buffer_page_end(dst_y_c); \
  718. free_aligned_buffer_page_end(dst_u_c); \
  719. free_aligned_buffer_page_end(dst_v_c); \
  720. free_aligned_buffer_page_end(dst_y_opt); \
  721. free_aligned_buffer_page_end(dst_u_opt); \
  722. free_aligned_buffer_page_end(dst_v_opt); \
  723. free_aligned_buffer_page_end(src_argb); \
  724. }
  725. #define TESTATOPLANAR(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  726. DIFF) \
  727. TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  728. benchmark_width_ - 4, DIFF, _Any, +, 0) \
  729. TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  730. benchmark_width_, DIFF, _Unaligned, +, 1) \
  731. TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  732. benchmark_width_, DIFF, _Invert, -, 0) \
  733. TESTATOPLANARI(FMT_A, BPP_A, YALIGN, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  734. benchmark_width_, DIFF, _Opt, +, 0)
  735. TESTATOPLANAR(ARGB, 4, 1, I420, 2, 2, 4)
  736. #if defined(__arm__) || defined (__aarch64__)
  737. // arm version subsamples by summing 4 pixels then multiplying by matrix with
  738. // 4x smaller coefficients which are rounded to nearest integer.
  739. TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2, 4)
  740. TESTATOPLANAR(ARGB, 4, 1, J422, 2, 1, 4)
  741. #else
  742. TESTATOPLANAR(ARGB, 4, 1, J420, 2, 2, 0)
  743. TESTATOPLANAR(ARGB, 4, 1, J422, 2, 1, 0)
  744. #endif
  745. TESTATOPLANAR(BGRA, 4, 1, I420, 2, 2, 4)
  746. TESTATOPLANAR(ABGR, 4, 1, I420, 2, 2, 4)
  747. TESTATOPLANAR(RGBA, 4, 1, I420, 2, 2, 4)
  748. TESTATOPLANAR(RAW, 3, 1, I420, 2, 2, 4)
  749. TESTATOPLANAR(RGB24, 3, 1, I420, 2, 2, 4)
  750. TESTATOPLANAR(RGB565, 2, 1, I420, 2, 2, 5)
  751. // TODO(fbarchard): Make 1555 neon work same as C code, reduce to diff 9.
  752. TESTATOPLANAR(ARGB1555, 2, 1, I420, 2, 2, 15)
  753. TESTATOPLANAR(ARGB4444, 2, 1, I420, 2, 2, 17)
  754. TESTATOPLANAR(ARGB, 4, 1, I411, 4, 1, 4)
  755. TESTATOPLANAR(ARGB, 4, 1, I422, 2, 1, 2)
  756. TESTATOPLANAR(ARGB, 4, 1, I444, 1, 1, 2)
  757. TESTATOPLANAR(YUY2, 2, 1, I420, 2, 2, 2)
  758. TESTATOPLANAR(UYVY, 2, 1, I420, 2, 2, 2)
  759. TESTATOPLANAR(YUY2, 2, 1, I422, 2, 1, 2)
  760. TESTATOPLANAR(UYVY, 2, 1, I422, 2, 1, 2)
  761. TESTATOPLANAR(I400, 1, 1, I420, 2, 2, 2)
  762. TESTATOPLANAR(J400, 1, 1, J420, 2, 2, 2)
  763. #define TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, \
  764. SUBSAMP_X, SUBSAMP_Y, W1280, N, NEG, OFF) \
  765. TEST_F(LibYUVConvertTest, FMT_A##To##FMT_PLANAR##N) { \
  766. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  767. const int kHeight = benchmark_height_; \
  768. const int kStride = SUBSAMPLE(kWidth, SUB_A) * BPP_A; \
  769. const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
  770. align_buffer_page_end(src_argb, kStride * kHeight + OFF); \
  771. align_buffer_page_end(dst_y_c, kWidth * kHeight); \
  772. align_buffer_page_end(dst_uv_c, \
  773. kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  774. align_buffer_page_end(dst_y_opt, kWidth * kHeight); \
  775. align_buffer_page_end(dst_uv_opt, \
  776. kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  777. for (int i = 0; i < kHeight; ++i) \
  778. for (int j = 0; j < kStride; ++j) \
  779. src_argb[(i * kStride) + j + OFF] = (fastrand() & 0xff); \
  780. memset(dst_y_c, 1, kWidth * kHeight); \
  781. memset(dst_uv_c, 2, kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  782. memset(dst_y_opt, 101, kWidth * kHeight); \
  783. memset(dst_uv_opt, 102, kStrideUV * 2 * SUBSAMPLE(kHeight, SUBSAMP_Y)); \
  784. MaskCpuFlags(disable_cpu_flags_); \
  785. FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, \
  786. dst_y_c, kWidth, dst_uv_c, kStrideUV * 2, \
  787. kWidth, NEG kHeight); \
  788. MaskCpuFlags(benchmark_cpu_info_); \
  789. for (int i = 0; i < benchmark_iterations_; ++i) { \
  790. FMT_A##To##FMT_PLANAR(src_argb + OFF, kStride, \
  791. dst_y_opt, kWidth, \
  792. dst_uv_opt, kStrideUV * 2, kWidth, NEG kHeight); \
  793. } \
  794. int max_diff = 0; \
  795. for (int i = 0; i < kHeight; ++i) { \
  796. for (int j = 0; j < kWidth; ++j) { \
  797. int abs_diff = \
  798. abs(static_cast<int>(dst_y_c[i * kWidth + j]) - \
  799. static_cast<int>(dst_y_opt[i * kWidth + j])); \
  800. if (abs_diff > max_diff) { \
  801. max_diff = abs_diff; \
  802. } \
  803. } \
  804. } \
  805. EXPECT_LE(max_diff, 4); \
  806. for (int i = 0; i < SUBSAMPLE(kHeight, SUBSAMP_Y); ++i) { \
  807. for (int j = 0; j < kStrideUV * 2; ++j) { \
  808. int abs_diff = \
  809. abs(static_cast<int>(dst_uv_c[i * kStrideUV * 2 + j]) - \
  810. static_cast<int>(dst_uv_opt[i * kStrideUV * 2 + j])); \
  811. if (abs_diff > max_diff) { \
  812. max_diff = abs_diff; \
  813. } \
  814. } \
  815. } \
  816. EXPECT_LE(max_diff, 4); \
  817. free_aligned_buffer_page_end(dst_y_c); \
  818. free_aligned_buffer_page_end(dst_uv_c); \
  819. free_aligned_buffer_page_end(dst_y_opt); \
  820. free_aligned_buffer_page_end(dst_uv_opt); \
  821. free_aligned_buffer_page_end(src_argb); \
  822. }
  823. #define TESTATOBIPLANAR(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y) \
  824. TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  825. benchmark_width_ - 4, _Any, +, 0) \
  826. TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  827. benchmark_width_, _Unaligned, +, 1) \
  828. TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  829. benchmark_width_, _Invert, -, 0) \
  830. TESTATOBIPLANARI(FMT_A, SUB_A, BPP_A, FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, \
  831. benchmark_width_, _Opt, +, 0)
  832. TESTATOBIPLANAR(ARGB, 1, 4, NV12, 2, 2)
  833. TESTATOBIPLANAR(ARGB, 1, 4, NV21, 2, 2)
  834. TESTATOBIPLANAR(YUY2, 2, 4, NV12, 2, 2)
  835. TESTATOBIPLANAR(UYVY, 2, 4, NV12, 2, 2)
  836. #define TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  837. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
  838. W1280, DIFF, N, NEG, OFF) \
  839. TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##N) { \
  840. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  841. const int kHeight = benchmark_height_; \
  842. const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
  843. const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
  844. const int kStrideA = (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
  845. const int kStrideB = (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
  846. align_buffer_page_end(src_argb, kStrideA * kHeightA + OFF); \
  847. align_buffer_page_end(dst_argb_c, kStrideB * kHeightB); \
  848. align_buffer_page_end(dst_argb_opt, kStrideB * kHeightB); \
  849. for (int i = 0; i < kStrideA * kHeightA; ++i) { \
  850. src_argb[i + OFF] = (fastrand() & 0xff); \
  851. } \
  852. memset(dst_argb_c, 1, kStrideB * kHeightB); \
  853. memset(dst_argb_opt, 101, kStrideB * kHeightB); \
  854. MaskCpuFlags(disable_cpu_flags_); \
  855. FMT_A##To##FMT_B(src_argb + OFF, kStrideA, \
  856. dst_argb_c, kStrideB, \
  857. kWidth, NEG kHeight); \
  858. MaskCpuFlags(benchmark_cpu_info_); \
  859. for (int i = 0; i < benchmark_iterations_; ++i) { \
  860. FMT_A##To##FMT_B(src_argb + OFF, kStrideA, \
  861. dst_argb_opt, kStrideB, \
  862. kWidth, NEG kHeight); \
  863. } \
  864. int max_diff = 0; \
  865. for (int i = 0; i < kStrideB * kHeightB; ++i) { \
  866. int abs_diff = \
  867. abs(static_cast<int>(dst_argb_c[i]) - \
  868. static_cast<int>(dst_argb_opt[i])); \
  869. if (abs_diff > max_diff) { \
  870. max_diff = abs_diff; \
  871. } \
  872. } \
  873. EXPECT_LE(max_diff, DIFF); \
  874. free_aligned_buffer_page_end(src_argb); \
  875. free_aligned_buffer_page_end(dst_argb_c); \
  876. free_aligned_buffer_page_end(dst_argb_opt); \
  877. }
  878. #define TESTATOBRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  879. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, DIFF) \
  880. TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##_Random) { \
  881. for (int times = 0; times < benchmark_iterations_; ++times) { \
  882. const int kWidth = (fastrand() & 63) + 1; \
  883. const int kHeight = (fastrand() & 31) + 1; \
  884. const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
  885. const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
  886. const int kStrideA = (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A;\
  887. const int kStrideB = (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B;\
  888. align_buffer_page_end(src_argb, kStrideA * kHeightA); \
  889. align_buffer_page_end(dst_argb_c, kStrideB * kHeightB); \
  890. align_buffer_page_end(dst_argb_opt, kStrideB * kHeightB); \
  891. for (int i = 0; i < kStrideA * kHeightA; ++i) { \
  892. src_argb[i] = (fastrand() & 0xff); \
  893. } \
  894. memset(dst_argb_c, 123, kStrideB * kHeightB); \
  895. memset(dst_argb_opt, 123, kStrideB * kHeightB); \
  896. MaskCpuFlags(disable_cpu_flags_); \
  897. FMT_A##To##FMT_B(src_argb, kStrideA, \
  898. dst_argb_c, kStrideB, \
  899. kWidth, kHeight); \
  900. MaskCpuFlags(benchmark_cpu_info_); \
  901. FMT_A##To##FMT_B(src_argb, kStrideA, \
  902. dst_argb_opt, kStrideB, \
  903. kWidth, kHeight); \
  904. int max_diff = 0; \
  905. for (int i = 0; i < kStrideB * kHeightB; ++i) { \
  906. int abs_diff = \
  907. abs(static_cast<int>(dst_argb_c[i]) - \
  908. static_cast<int>(dst_argb_opt[i])); \
  909. if (abs_diff > max_diff) { \
  910. max_diff = abs_diff; \
  911. } \
  912. } \
  913. EXPECT_LE(max_diff, DIFF); \
  914. free_aligned_buffer_page_end(src_argb); \
  915. free_aligned_buffer_page_end(dst_argb_c); \
  916. free_aligned_buffer_page_end(dst_argb_opt); \
  917. } \
  918. }
  919. #define TESTATOB(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  920. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, DIFF) \
  921. TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  922. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
  923. benchmark_width_ - 4, DIFF, _Any, +, 0) \
  924. TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  925. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
  926. benchmark_width_, DIFF, _Unaligned, +, 1) \
  927. TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  928. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
  929. benchmark_width_, DIFF, _Invert, -, 0) \
  930. TESTATOBI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  931. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
  932. benchmark_width_, DIFF, _Opt, +, 0) \
  933. TESTATOBRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  934. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, DIFF)
  935. TESTATOB(ARGB, 4, 4, 1, ARGB, 4, 4, 1, 0)
  936. TESTATOB(ARGB, 4, 4, 1, BGRA, 4, 4, 1, 0)
  937. TESTATOB(ARGB, 4, 4, 1, ABGR, 4, 4, 1, 0)
  938. TESTATOB(ARGB, 4, 4, 1, RGBA, 4, 4, 1, 0)
  939. TESTATOB(ARGB, 4, 4, 1, RAW, 3, 3, 1, 0)
  940. TESTATOB(ARGB, 4, 4, 1, RGB24, 3, 3, 1, 0)
  941. TESTATOB(ARGB, 4, 4, 1, RGB565, 2, 2, 1, 0)
  942. TESTATOB(ARGB, 4, 4, 1, ARGB1555, 2, 2, 1, 0)
  943. TESTATOB(ARGB, 4, 4, 1, ARGB4444, 2, 2, 1, 0)
  944. TESTATOB(ARGB, 4, 4, 1, YUY2, 2, 4, 1, 4)
  945. TESTATOB(ARGB, 4, 4, 1, UYVY, 2, 4, 1, 4)
  946. TESTATOB(ARGB, 4, 4, 1, I400, 1, 1, 1, 2)
  947. TESTATOB(ARGB, 4, 4, 1, J400, 1, 1, 1, 2)
  948. TESTATOB(BGRA, 4, 4, 1, ARGB, 4, 4, 1, 0)
  949. TESTATOB(ABGR, 4, 4, 1, ARGB, 4, 4, 1, 0)
  950. TESTATOB(RGBA, 4, 4, 1, ARGB, 4, 4, 1, 0)
  951. TESTATOB(RAW, 3, 3, 1, ARGB, 4, 4, 1, 0)
  952. TESTATOB(RAW, 3, 3, 1, RGB24, 3, 3, 1, 0)
  953. TESTATOB(RGB24, 3, 3, 1, ARGB, 4, 4, 1, 0)
  954. TESTATOB(RGB565, 2, 2, 1, ARGB, 4, 4, 1, 0)
  955. TESTATOB(ARGB1555, 2, 2, 1, ARGB, 4, 4, 1, 0)
  956. TESTATOB(ARGB4444, 2, 2, 1, ARGB, 4, 4, 1, 0)
  957. TESTATOB(YUY2, 2, 4, 1, ARGB, 4, 4, 1, 4)
  958. TESTATOB(UYVY, 2, 4, 1, ARGB, 4, 4, 1, 4)
  959. TESTATOB(I400, 1, 1, 1, ARGB, 4, 4, 1, 0)
  960. TESTATOB(J400, 1, 1, 1, ARGB, 4, 4, 1, 0)
  961. TESTATOB(I400, 1, 1, 1, I400, 1, 1, 1, 0)
  962. TESTATOB(J400, 1, 1, 1, J400, 1, 1, 1, 0)
  963. TESTATOB(I400, 1, 1, 1, I400Mirror, 1, 1, 1, 0)
  964. TESTATOB(ARGB, 4, 4, 1, ARGBMirror, 4, 4, 1, 0)
  965. #define TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  966. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
  967. W1280, DIFF, N, NEG, OFF) \
  968. TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##Dither##N) { \
  969. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  970. const int kHeight = benchmark_height_; \
  971. const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
  972. const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
  973. const int kStrideA = (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
  974. const int kStrideB = (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B; \
  975. align_buffer_page_end(src_argb, kStrideA * kHeightA + OFF); \
  976. align_buffer_page_end(dst_argb_c, kStrideB * kHeightB); \
  977. align_buffer_page_end(dst_argb_opt, kStrideB * kHeightB); \
  978. for (int i = 0; i < kStrideA * kHeightA; ++i) { \
  979. src_argb[i + OFF] = (fastrand() & 0xff); \
  980. } \
  981. memset(dst_argb_c, 1, kStrideB * kHeightB); \
  982. memset(dst_argb_opt, 101, kStrideB * kHeightB); \
  983. MaskCpuFlags(disable_cpu_flags_); \
  984. FMT_A##To##FMT_B##Dither(src_argb + OFF, kStrideA, \
  985. dst_argb_c, kStrideB, \
  986. NULL, kWidth, NEG kHeight); \
  987. MaskCpuFlags(benchmark_cpu_info_); \
  988. for (int i = 0; i < benchmark_iterations_; ++i) { \
  989. FMT_A##To##FMT_B##Dither(src_argb + OFF, kStrideA, \
  990. dst_argb_opt, kStrideB, \
  991. NULL, kWidth, NEG kHeight); \
  992. } \
  993. int max_diff = 0; \
  994. for (int i = 0; i < kStrideB * kHeightB; ++i) { \
  995. int abs_diff = \
  996. abs(static_cast<int>(dst_argb_c[i]) - \
  997. static_cast<int>(dst_argb_opt[i])); \
  998. if (abs_diff > max_diff) { \
  999. max_diff = abs_diff; \
  1000. } \
  1001. } \
  1002. EXPECT_LE(max_diff, DIFF); \
  1003. free_aligned_buffer_page_end(src_argb); \
  1004. free_aligned_buffer_page_end(dst_argb_c); \
  1005. free_aligned_buffer_page_end(dst_argb_opt); \
  1006. }
  1007. #define TESTATOBDRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  1008. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, DIFF) \
  1009. TEST_F(LibYUVConvertTest, FMT_A##To##FMT_B##Dither_Random) { \
  1010. for (int times = 0; times < benchmark_iterations_; ++times) { \
  1011. const int kWidth = (fastrand() & 63) + 1; \
  1012. const int kHeight = (fastrand() & 31) + 1; \
  1013. const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
  1014. const int kHeightB = (kHeight + HEIGHT_B - 1) / HEIGHT_B * HEIGHT_B; \
  1015. const int kStrideA = (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A;\
  1016. const int kStrideB = (kWidth * BPP_B + STRIDE_B - 1) / STRIDE_B * STRIDE_B;\
  1017. align_buffer_page_end(src_argb, kStrideA * kHeightA); \
  1018. align_buffer_page_end(dst_argb_c, kStrideB * kHeightB); \
  1019. align_buffer_page_end(dst_argb_opt, kStrideB * kHeightB); \
  1020. for (int i = 0; i < kStrideA * kHeightA; ++i) { \
  1021. src_argb[i] = (fastrand() & 0xff); \
  1022. } \
  1023. memset(dst_argb_c, 123, kStrideB * kHeightB); \
  1024. memset(dst_argb_opt, 123, kStrideB * kHeightB); \
  1025. MaskCpuFlags(disable_cpu_flags_); \
  1026. FMT_A##To##FMT_B##Dither(src_argb, kStrideA, \
  1027. dst_argb_c, kStrideB, \
  1028. NULL, kWidth, kHeight); \
  1029. MaskCpuFlags(benchmark_cpu_info_); \
  1030. FMT_A##To##FMT_B##Dither(src_argb, kStrideA, \
  1031. dst_argb_opt, kStrideB, \
  1032. NULL, kWidth, kHeight); \
  1033. int max_diff = 0; \
  1034. for (int i = 0; i < kStrideB * kHeightB; ++i) { \
  1035. int abs_diff = \
  1036. abs(static_cast<int>(dst_argb_c[i]) - \
  1037. static_cast<int>(dst_argb_opt[i])); \
  1038. if (abs_diff > max_diff) { \
  1039. max_diff = abs_diff; \
  1040. } \
  1041. } \
  1042. EXPECT_LE(max_diff, DIFF); \
  1043. free_aligned_buffer_page_end(src_argb); \
  1044. free_aligned_buffer_page_end(dst_argb_c); \
  1045. free_aligned_buffer_page_end(dst_argb_opt); \
  1046. } \
  1047. }
  1048. #define TESTATOBD(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  1049. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, DIFF) \
  1050. TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  1051. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
  1052. benchmark_width_ - 4, DIFF, _Any, +, 0) \
  1053. TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  1054. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
  1055. benchmark_width_, DIFF, _Unaligned, +, 1) \
  1056. TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  1057. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
  1058. benchmark_width_, DIFF, _Invert, -, 0) \
  1059. TESTATOBDI(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  1060. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, \
  1061. benchmark_width_, DIFF, _Opt, +, 0) \
  1062. TESTATOBDRANDOM(FMT_A, BPP_A, STRIDE_A, HEIGHT_A, \
  1063. FMT_B, BPP_B, STRIDE_B, HEIGHT_B, DIFF)
  1064. TESTATOBD(ARGB, 4, 4, 1, RGB565, 2, 2, 1, 0)
  1065. #define TESTSYMI(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A, \
  1066. W1280, N, NEG, OFF) \
  1067. TEST_F(LibYUVConvertTest, FMT_ATOB##_Symetric##N) { \
  1068. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  1069. const int kHeight = benchmark_height_; \
  1070. const int kHeightA = (kHeight + HEIGHT_A - 1) / HEIGHT_A * HEIGHT_A; \
  1071. const int kStrideA = (kWidth * BPP_A + STRIDE_A - 1) / STRIDE_A * STRIDE_A; \
  1072. align_buffer_page_end(src_argb, kStrideA * kHeightA + OFF); \
  1073. align_buffer_page_end(dst_argb_c, kStrideA * kHeightA); \
  1074. align_buffer_page_end(dst_argb_opt, kStrideA * kHeightA); \
  1075. for (int i = 0; i < kStrideA * kHeightA; ++i) { \
  1076. src_argb[i + OFF] = (fastrand() & 0xff); \
  1077. } \
  1078. memset(dst_argb_c, 1, kStrideA * kHeightA); \
  1079. memset(dst_argb_opt, 101, kStrideA * kHeightA); \
  1080. MaskCpuFlags(disable_cpu_flags_); \
  1081. FMT_ATOB(src_argb + OFF, kStrideA, \
  1082. dst_argb_c, kStrideA, \
  1083. kWidth, NEG kHeight); \
  1084. MaskCpuFlags(benchmark_cpu_info_); \
  1085. for (int i = 0; i < benchmark_iterations_; ++i) { \
  1086. FMT_ATOB(src_argb + OFF, kStrideA, \
  1087. dst_argb_opt, kStrideA, \
  1088. kWidth, NEG kHeight); \
  1089. } \
  1090. MaskCpuFlags(disable_cpu_flags_); \
  1091. FMT_ATOB(dst_argb_c, kStrideA, \
  1092. dst_argb_c, kStrideA, \
  1093. kWidth, NEG kHeight); \
  1094. MaskCpuFlags(benchmark_cpu_info_); \
  1095. FMT_ATOB(dst_argb_opt, kStrideA, \
  1096. dst_argb_opt, kStrideA, \
  1097. kWidth, NEG kHeight); \
  1098. for (int i = 0; i < kStrideA * kHeightA; ++i) { \
  1099. EXPECT_EQ(src_argb[i + OFF], dst_argb_opt[i]); \
  1100. EXPECT_EQ(dst_argb_c[i], dst_argb_opt[i]); \
  1101. } \
  1102. free_aligned_buffer_page_end(src_argb); \
  1103. free_aligned_buffer_page_end(dst_argb_c); \
  1104. free_aligned_buffer_page_end(dst_argb_opt); \
  1105. }
  1106. #define TESTSYM(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A) \
  1107. TESTSYMI(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A, \
  1108. benchmark_width_ - 4, _Any, +, 0) \
  1109. TESTSYMI(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A, \
  1110. benchmark_width_, _Unaligned, +, 1) \
  1111. TESTSYMI(FMT_ATOB, BPP_A, STRIDE_A, HEIGHT_A, \
  1112. benchmark_width_, _Opt, +, 0)
  1113. TESTSYM(ARGBToARGB, 4, 4, 1)
  1114. TESTSYM(ARGBToBGRA, 4, 4, 1)
  1115. TESTSYM(ARGBToABGR, 4, 4, 1)
  1116. TESTSYM(BGRAToARGB, 4, 4, 1)
  1117. TESTSYM(ABGRToARGB, 4, 4, 1)
  1118. TEST_F(LibYUVConvertTest, Test565) {
  1119. SIMD_ALIGNED(uint8 orig_pixels[256][4]);
  1120. SIMD_ALIGNED(uint8 pixels565[256][2]);
  1121. for (int i = 0; i < 256; ++i) {
  1122. for (int j = 0; j < 4; ++j) {
  1123. orig_pixels[i][j] = i;
  1124. }
  1125. }
  1126. ARGBToRGB565(&orig_pixels[0][0], 0, &pixels565[0][0], 0, 256, 1);
  1127. uint32 checksum = HashDjb2(&pixels565[0][0], sizeof(pixels565), 5381);
  1128. EXPECT_EQ(610919429u, checksum);
  1129. }
  1130. #ifdef HAVE_JPEG
  1131. TEST_F(LibYUVConvertTest, ValidateJpeg) {
  1132. const int kOff = 10;
  1133. const int kMinJpeg = 64;
  1134. const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg ?
  1135. benchmark_width_ * benchmark_height_ : kMinJpeg;
  1136. const int kSize = kImageSize + kOff;
  1137. align_buffer_page_end(orig_pixels, kSize);
  1138. // No SOI or EOI. Expect fail.
  1139. memset(orig_pixels, 0, kSize);
  1140. EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
  1141. // Test special value that matches marker start.
  1142. memset(orig_pixels, 0xff, kSize);
  1143. EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
  1144. // EOI, SOI. Expect pass.
  1145. orig_pixels[0] = 0xff;
  1146. orig_pixels[1] = 0xd8; // SOI.
  1147. orig_pixels[kSize - kOff + 0] = 0xff;
  1148. orig_pixels[kSize - kOff + 1] = 0xd9; // EOI.
  1149. for (int times = 0; times < benchmark_iterations_; ++times) {
  1150. EXPECT_TRUE(ValidateJpeg(orig_pixels, kSize));
  1151. }
  1152. free_aligned_buffer_page_end(orig_pixels);
  1153. }
  1154. TEST_F(LibYUVConvertTest, ValidateJpegLarge) {
  1155. const int kOff = 10;
  1156. const int kMinJpeg = 64;
  1157. const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg ?
  1158. benchmark_width_ * benchmark_height_ : kMinJpeg;
  1159. const int kSize = kImageSize + kOff;
  1160. const int kMultiple = 10;
  1161. const int kBufSize = kImageSize * kMultiple + kOff;
  1162. align_buffer_page_end(orig_pixels, kBufSize);
  1163. // No SOI or EOI. Expect fail.
  1164. memset(orig_pixels, 0, kBufSize);
  1165. EXPECT_FALSE(ValidateJpeg(orig_pixels, kBufSize));
  1166. // EOI, SOI. Expect pass.
  1167. orig_pixels[0] = 0xff;
  1168. orig_pixels[1] = 0xd8; // SOI.
  1169. orig_pixels[kSize - kOff + 0] = 0xff;
  1170. orig_pixels[kSize - kOff + 1] = 0xd9; // EOI.
  1171. for (int times = 0; times < benchmark_iterations_; ++times) {
  1172. EXPECT_TRUE(ValidateJpeg(orig_pixels, kBufSize));
  1173. }
  1174. free_aligned_buffer_page_end(orig_pixels);
  1175. }
  1176. TEST_F(LibYUVConvertTest, InvalidateJpeg) {
  1177. const int kOff = 10;
  1178. const int kMinJpeg = 64;
  1179. const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg ?
  1180. benchmark_width_ * benchmark_height_ : kMinJpeg;
  1181. const int kSize = kImageSize + kOff;
  1182. align_buffer_page_end(orig_pixels, kSize);
  1183. // NULL pointer. Expect fail.
  1184. EXPECT_FALSE(ValidateJpeg(NULL, kSize));
  1185. // Negative size. Expect fail.
  1186. EXPECT_FALSE(ValidateJpeg(orig_pixels, -1));
  1187. // Too large size. Expect fail.
  1188. EXPECT_FALSE(ValidateJpeg(orig_pixels, 0xfb000000ull));
  1189. // No SOI or EOI. Expect fail.
  1190. memset(orig_pixels, 0, kSize);
  1191. EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
  1192. // SOI but no EOI. Expect fail.
  1193. orig_pixels[0] = 0xff;
  1194. orig_pixels[1] = 0xd8; // SOI.
  1195. for (int times = 0; times < benchmark_iterations_; ++times) {
  1196. EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
  1197. }
  1198. // EOI but no SOI. Expect fail.
  1199. orig_pixels[0] = 0;
  1200. orig_pixels[1] = 0;
  1201. orig_pixels[kSize - kOff + 0] = 0xff;
  1202. orig_pixels[kSize - kOff + 1] = 0xd9; // EOI.
  1203. EXPECT_FALSE(ValidateJpeg(orig_pixels, kSize));
  1204. free_aligned_buffer_page_end(orig_pixels);
  1205. }
  1206. TEST_F(LibYUVConvertTest, FuzzJpeg) {
  1207. // SOI but no EOI. Expect fail.
  1208. for (int times = 0; times < benchmark_iterations_; ++times) {
  1209. const int kSize = fastrand() % 5000 + 2;
  1210. align_buffer_page_end(orig_pixels, kSize);
  1211. MemRandomize(orig_pixels, kSize);
  1212. // Add SOI so frame will be scanned.
  1213. orig_pixels[0] = 0xff;
  1214. orig_pixels[1] = 0xd8; // SOI.
  1215. orig_pixels[kSize - 1] = 0xff;
  1216. ValidateJpeg(orig_pixels, kSize); // Failure normally expected.
  1217. free_aligned_buffer_page_end(orig_pixels);
  1218. }
  1219. }
  1220. TEST_F(LibYUVConvertTest, MJPGToI420) {
  1221. const int kOff = 10;
  1222. const int kMinJpeg = 64;
  1223. const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg ?
  1224. benchmark_width_ * benchmark_height_ : kMinJpeg;
  1225. const int kSize = kImageSize + kOff;
  1226. align_buffer_page_end(orig_pixels, kSize);
  1227. align_buffer_page_end(dst_y_opt, benchmark_width_ * benchmark_height_);
  1228. align_buffer_page_end(dst_u_opt,
  1229. SUBSAMPLE(benchmark_width_, 2) *
  1230. SUBSAMPLE(benchmark_height_, 2));
  1231. align_buffer_page_end(dst_v_opt,
  1232. SUBSAMPLE(benchmark_width_, 2) *
  1233. SUBSAMPLE(benchmark_height_, 2));
  1234. // EOI, SOI to make MJPG appear valid.
  1235. memset(orig_pixels, 0, kSize);
  1236. orig_pixels[0] = 0xff;
  1237. orig_pixels[1] = 0xd8; // SOI.
  1238. orig_pixels[kSize - kOff + 0] = 0xff;
  1239. orig_pixels[kSize - kOff + 1] = 0xd9; // EOI.
  1240. for (int times = 0; times < benchmark_iterations_; ++times) {
  1241. int ret = MJPGToI420(orig_pixels, kSize,
  1242. dst_y_opt, benchmark_width_,
  1243. dst_u_opt, SUBSAMPLE(benchmark_width_, 2),
  1244. dst_v_opt, SUBSAMPLE(benchmark_width_, 2),
  1245. benchmark_width_, benchmark_height_,
  1246. benchmark_width_, benchmark_height_);
  1247. // Expect failure because image is not really valid.
  1248. EXPECT_EQ(1, ret);
  1249. }
  1250. free_aligned_buffer_page_end(dst_y_opt);
  1251. free_aligned_buffer_page_end(dst_u_opt);
  1252. free_aligned_buffer_page_end(dst_v_opt);
  1253. free_aligned_buffer_page_end(orig_pixels);
  1254. }
  1255. TEST_F(LibYUVConvertTest, MJPGToARGB) {
  1256. const int kOff = 10;
  1257. const int kMinJpeg = 64;
  1258. const int kImageSize = benchmark_width_ * benchmark_height_ >= kMinJpeg ?
  1259. benchmark_width_ * benchmark_height_ : kMinJpeg;
  1260. const int kSize = kImageSize + kOff;
  1261. align_buffer_page_end(orig_pixels, kSize);
  1262. align_buffer_page_end(dst_argb_opt, benchmark_width_ * benchmark_height_ * 4);
  1263. // EOI, SOI to make MJPG appear valid.
  1264. memset(orig_pixels, 0, kSize);
  1265. orig_pixels[0] = 0xff;
  1266. orig_pixels[1] = 0xd8; // SOI.
  1267. orig_pixels[kSize - kOff + 0] = 0xff;
  1268. orig_pixels[kSize - kOff + 1] = 0xd9; // EOI.
  1269. for (int times = 0; times < benchmark_iterations_; ++times) {
  1270. int ret = MJPGToARGB(orig_pixels, kSize,
  1271. dst_argb_opt, benchmark_width_ * 4,
  1272. benchmark_width_, benchmark_height_,
  1273. benchmark_width_, benchmark_height_);
  1274. // Expect failure because image is not really valid.
  1275. EXPECT_EQ(1, ret);
  1276. }
  1277. free_aligned_buffer_page_end(dst_argb_opt);
  1278. free_aligned_buffer_page_end(orig_pixels);
  1279. }
  1280. #endif // HAVE_JPEG
  1281. TEST_F(LibYUVConvertTest, NV12Crop) {
  1282. const int SUBSAMP_X = 2;
  1283. const int SUBSAMP_Y = 2;
  1284. const int kWidth = benchmark_width_;
  1285. const int kHeight = benchmark_height_;
  1286. const int crop_y =
  1287. ((benchmark_height_ - (benchmark_height_ * 360 / 480)) / 2 + 1) & ~1;
  1288. const int kDestWidth = benchmark_width_;
  1289. const int kDestHeight = benchmark_height_ - crop_y * 2;
  1290. const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X);
  1291. const int sample_size = kWidth * kHeight +
  1292. kStrideUV *
  1293. SUBSAMPLE(kHeight, SUBSAMP_Y) * 2;
  1294. align_buffer_page_end(src_y, sample_size);
  1295. uint8* src_uv = src_y + kWidth * kHeight;
  1296. align_buffer_page_end(dst_y, kDestWidth * kDestHeight);
  1297. align_buffer_page_end(dst_u,
  1298. SUBSAMPLE(kDestWidth, SUBSAMP_X) *
  1299. SUBSAMPLE(kDestHeight, SUBSAMP_Y));
  1300. align_buffer_page_end(dst_v,
  1301. SUBSAMPLE(kDestWidth, SUBSAMP_X) *
  1302. SUBSAMPLE(kDestHeight, SUBSAMP_Y));
  1303. align_buffer_page_end(dst_y_2, kDestWidth * kDestHeight);
  1304. align_buffer_page_end(dst_u_2,
  1305. SUBSAMPLE(kDestWidth, SUBSAMP_X) *
  1306. SUBSAMPLE(kDestHeight, SUBSAMP_Y));
  1307. align_buffer_page_end(dst_v_2,
  1308. SUBSAMPLE(kDestWidth, SUBSAMP_X) *
  1309. SUBSAMPLE(kDestHeight, SUBSAMP_Y));
  1310. for (int i = 0; i < kHeight * kWidth; ++i) {
  1311. src_y[i] = (fastrand() & 0xff);
  1312. }
  1313. for (int i = 0; i < (SUBSAMPLE(kHeight, SUBSAMP_Y) *
  1314. kStrideUV) * 2; ++i) {
  1315. src_uv[i] = (fastrand() & 0xff);
  1316. }
  1317. memset(dst_y, 1, kDestWidth * kDestHeight);
  1318. memset(dst_u, 2, SUBSAMPLE(kDestWidth, SUBSAMP_X) *
  1319. SUBSAMPLE(kDestHeight, SUBSAMP_Y));
  1320. memset(dst_v, 3, SUBSAMPLE(kDestWidth, SUBSAMP_X) *
  1321. SUBSAMPLE(kDestHeight, SUBSAMP_Y));
  1322. memset(dst_y_2, 1, kDestWidth * kDestHeight);
  1323. memset(dst_u_2, 2, SUBSAMPLE(kDestWidth, SUBSAMP_X) *
  1324. SUBSAMPLE(kDestHeight, SUBSAMP_Y));
  1325. memset(dst_v_2, 3, SUBSAMPLE(kDestWidth, SUBSAMP_X) *
  1326. SUBSAMPLE(kDestHeight, SUBSAMP_Y));
  1327. ConvertToI420(src_y, sample_size,
  1328. dst_y_2, kDestWidth,
  1329. dst_u_2, SUBSAMPLE(kDestWidth, SUBSAMP_X),
  1330. dst_v_2, SUBSAMPLE(kDestWidth, SUBSAMP_X),
  1331. 0, crop_y,
  1332. kWidth, kHeight,
  1333. kDestWidth, kDestHeight,
  1334. libyuv::kRotate0, libyuv::FOURCC_NV12);
  1335. NV12ToI420(src_y + crop_y * kWidth, kWidth,
  1336. src_uv + (crop_y / 2) * kStrideUV * 2,
  1337. kStrideUV * 2,
  1338. dst_y, kDestWidth,
  1339. dst_u, SUBSAMPLE(kDestWidth, SUBSAMP_X),
  1340. dst_v, SUBSAMPLE(kDestWidth, SUBSAMP_X),
  1341. kDestWidth, kDestHeight);
  1342. for (int i = 0; i < kDestHeight; ++i) {
  1343. for (int j = 0; j < kDestWidth; ++j) {
  1344. EXPECT_EQ(dst_y[i * kWidth + j], dst_y_2[i * kWidth + j]);
  1345. }
  1346. }
  1347. for (int i = 0; i < SUBSAMPLE(kDestHeight, SUBSAMP_Y); ++i) {
  1348. for (int j = 0; j < SUBSAMPLE(kDestWidth, SUBSAMP_X); ++j) {
  1349. EXPECT_EQ(dst_u[i * SUBSAMPLE(kDestWidth, SUBSAMP_X) + j],
  1350. dst_u_2[i * SUBSAMPLE(kDestWidth, SUBSAMP_X) + j]);
  1351. }
  1352. }
  1353. for (int i = 0; i < SUBSAMPLE(kDestHeight, SUBSAMP_Y); ++i) {
  1354. for (int j = 0; j < SUBSAMPLE(kDestWidth, SUBSAMP_X); ++j) {
  1355. EXPECT_EQ(dst_v[i * SUBSAMPLE(kDestWidth, SUBSAMP_X) + j],
  1356. dst_v_2[i * SUBSAMPLE(kDestWidth, SUBSAMP_X) + j]);
  1357. }
  1358. }
  1359. free_aligned_buffer_page_end(dst_y);
  1360. free_aligned_buffer_page_end(dst_u);
  1361. free_aligned_buffer_page_end(dst_v);
  1362. free_aligned_buffer_page_end(dst_y_2);
  1363. free_aligned_buffer_page_end(dst_u_2);
  1364. free_aligned_buffer_page_end(dst_v_2);
  1365. free_aligned_buffer_page_end(src_y);
  1366. }
  1367. TEST_F(LibYUVConvertTest, TestYToARGB) {
  1368. uint8 y[32];
  1369. uint8 expectedg[32];
  1370. for (int i = 0; i < 32; ++i) {
  1371. y[i] = i * 5 + 17;
  1372. expectedg[i] = static_cast<int>((y[i] - 16) * 1.164f + 0.5f);
  1373. }
  1374. uint8 argb[32 * 4];
  1375. YToARGB(y, 0, argb, 0, 32, 1);
  1376. for (int i = 0; i < 32; ++i) {
  1377. printf("%2d %d: %d <-> %d,%d,%d,%d\n", i, y[i], expectedg[i],
  1378. argb[i * 4 + 0],
  1379. argb[i * 4 + 1],
  1380. argb[i * 4 + 2],
  1381. argb[i * 4 + 3]);
  1382. }
  1383. for (int i = 0; i < 32; ++i) {
  1384. EXPECT_EQ(expectedg[i], argb[i * 4 + 0]);
  1385. }
  1386. }
  1387. static const uint8 kNoDither4x4[16] = {
  1388. 0, 0, 0, 0,
  1389. 0, 0, 0, 0,
  1390. 0, 0, 0, 0,
  1391. 0, 0, 0, 0,
  1392. };
  1393. TEST_F(LibYUVConvertTest, TestNoDither) {
  1394. align_buffer_page_end(src_argb, benchmark_width_ * benchmark_height_ * 4);
  1395. align_buffer_page_end(dst_rgb565, benchmark_width_ * benchmark_height_ * 2);
  1396. align_buffer_page_end(dst_rgb565dither,
  1397. benchmark_width_ * benchmark_height_ * 2);
  1398. MemRandomize(src_argb, benchmark_width_ * benchmark_height_ * 4);
  1399. MemRandomize(dst_rgb565, benchmark_width_ * benchmark_height_ * 2);
  1400. MemRandomize(dst_rgb565dither, benchmark_width_ * benchmark_height_ * 2);
  1401. ARGBToRGB565(src_argb, benchmark_width_ * 4,
  1402. dst_rgb565, benchmark_width_ * 2,
  1403. benchmark_width_, benchmark_height_);
  1404. ARGBToRGB565Dither(src_argb, benchmark_width_ * 4,
  1405. dst_rgb565dither, benchmark_width_ * 2,
  1406. kNoDither4x4, benchmark_width_, benchmark_height_);
  1407. for (int i = 0; i < benchmark_width_ * benchmark_height_ * 2; ++i) {
  1408. EXPECT_EQ(dst_rgb565[i], dst_rgb565dither[i]);
  1409. }
  1410. free_aligned_buffer_page_end(src_argb);
  1411. free_aligned_buffer_page_end(dst_rgb565);
  1412. free_aligned_buffer_page_end(dst_rgb565dither);
  1413. }
  1414. // Ordered 4x4 dither for 888 to 565. Values from 0 to 7.
  1415. static const uint8 kDither565_4x4[16] = {
  1416. 0, 4, 1, 5,
  1417. 6, 2, 7, 3,
  1418. 1, 5, 0, 4,
  1419. 7, 3, 6, 2,
  1420. };
  1421. TEST_F(LibYUVConvertTest, TestDither) {
  1422. align_buffer_page_end(src_argb, benchmark_width_ * benchmark_height_ * 4);
  1423. align_buffer_page_end(dst_rgb565, benchmark_width_ * benchmark_height_ * 2);
  1424. align_buffer_page_end(dst_rgb565dither,
  1425. benchmark_width_ * benchmark_height_ * 2);
  1426. align_buffer_page_end(dst_argb, benchmark_width_ * benchmark_height_ * 4);
  1427. align_buffer_page_end(dst_argbdither,
  1428. benchmark_width_ * benchmark_height_ * 4);
  1429. MemRandomize(src_argb, benchmark_width_ * benchmark_height_ * 4);
  1430. MemRandomize(dst_rgb565, benchmark_width_ * benchmark_height_ * 2);
  1431. MemRandomize(dst_rgb565dither, benchmark_width_ * benchmark_height_ * 2);
  1432. MemRandomize(dst_argb, benchmark_width_ * benchmark_height_ * 4);
  1433. MemRandomize(dst_argbdither, benchmark_width_ * benchmark_height_ * 4);
  1434. ARGBToRGB565(src_argb, benchmark_width_ * 4,
  1435. dst_rgb565, benchmark_width_ * 2,
  1436. benchmark_width_, benchmark_height_);
  1437. ARGBToRGB565Dither(src_argb, benchmark_width_ * 4,
  1438. dst_rgb565dither, benchmark_width_ * 2,
  1439. kDither565_4x4, benchmark_width_, benchmark_height_);
  1440. RGB565ToARGB(dst_rgb565, benchmark_width_ * 2,
  1441. dst_argb, benchmark_width_ * 4,
  1442. benchmark_width_, benchmark_height_);
  1443. RGB565ToARGB(dst_rgb565dither, benchmark_width_ * 2,
  1444. dst_argbdither, benchmark_width_ * 4,
  1445. benchmark_width_, benchmark_height_);
  1446. for (int i = 0; i < benchmark_width_ * benchmark_height_ * 4; ++i) {
  1447. EXPECT_NEAR(dst_argb[i], dst_argbdither[i], 9);
  1448. }
  1449. free_aligned_buffer_page_end(src_argb);
  1450. free_aligned_buffer_page_end(dst_rgb565);
  1451. free_aligned_buffer_page_end(dst_rgb565dither);
  1452. free_aligned_buffer_page_end(dst_argb);
  1453. free_aligned_buffer_page_end(dst_argbdither);
  1454. }
  1455. #define TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  1456. YALIGN, W1280, DIFF, N, NEG, OFF, FMT_C, BPP_C) \
  1457. TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##Dither##N) { \
  1458. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  1459. const int kHeight = ALIGNINT(benchmark_height_, YALIGN); \
  1460. const int kStrideB = ALIGNINT(kWidth * BPP_B, ALIGN); \
  1461. const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
  1462. const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
  1463. align_buffer_page_end(src_y, kWidth * kHeight + OFF); \
  1464. align_buffer_page_end(src_u, kSizeUV + OFF); \
  1465. align_buffer_page_end(src_v, kSizeUV + OFF); \
  1466. align_buffer_page_end(dst_argb_c, kStrideB * kHeight + OFF); \
  1467. align_buffer_page_end(dst_argb_opt, kStrideB * kHeight + OFF); \
  1468. for (int i = 0; i < kWidth * kHeight; ++i) { \
  1469. src_y[i + OFF] = (fastrand() & 0xff); \
  1470. } \
  1471. for (int i = 0; i < kSizeUV; ++i) { \
  1472. src_u[i + OFF] = (fastrand() & 0xff); \
  1473. src_v[i + OFF] = (fastrand() & 0xff); \
  1474. } \
  1475. memset(dst_argb_c + OFF, 1, kStrideB * kHeight); \
  1476. memset(dst_argb_opt + OFF, 101, kStrideB * kHeight); \
  1477. MaskCpuFlags(disable_cpu_flags_); \
  1478. FMT_PLANAR##To##FMT_B##Dither(src_y + OFF, kWidth, \
  1479. src_u + OFF, kStrideUV, \
  1480. src_v + OFF, kStrideUV, \
  1481. dst_argb_c + OFF, kStrideB, \
  1482. NULL, kWidth, NEG kHeight); \
  1483. MaskCpuFlags(benchmark_cpu_info_); \
  1484. for (int i = 0; i < benchmark_iterations_; ++i) { \
  1485. FMT_PLANAR##To##FMT_B##Dither(src_y + OFF, kWidth, \
  1486. src_u + OFF, kStrideUV, \
  1487. src_v + OFF, kStrideUV, \
  1488. dst_argb_opt + OFF, kStrideB, \
  1489. NULL, kWidth, NEG kHeight); \
  1490. } \
  1491. int max_diff = 0; \
  1492. /* Convert to ARGB so 565 is expanded to bytes that can be compared. */ \
  1493. align_buffer_page_end(dst_argb32_c, kWidth * BPP_C * kHeight); \
  1494. align_buffer_page_end(dst_argb32_opt, kWidth * BPP_C * kHeight); \
  1495. memset(dst_argb32_c, 2, kWidth * BPP_C * kHeight); \
  1496. memset(dst_argb32_opt, 102, kWidth * BPP_C * kHeight); \
  1497. FMT_B##To##FMT_C(dst_argb_c + OFF, kStrideB, \
  1498. dst_argb32_c, kWidth * BPP_C , \
  1499. kWidth, kHeight); \
  1500. FMT_B##To##FMT_C(dst_argb_opt + OFF, kStrideB, \
  1501. dst_argb32_opt, kWidth * BPP_C , \
  1502. kWidth, kHeight); \
  1503. for (int i = 0; i < kWidth * BPP_C * kHeight; ++i) { \
  1504. int abs_diff = \
  1505. abs(static_cast<int>(dst_argb32_c[i]) - \
  1506. static_cast<int>(dst_argb32_opt[i])); \
  1507. if (abs_diff > max_diff) { \
  1508. max_diff = abs_diff; \
  1509. } \
  1510. } \
  1511. EXPECT_LE(max_diff, DIFF); \
  1512. free_aligned_buffer_page_end(src_y); \
  1513. free_aligned_buffer_page_end(src_u); \
  1514. free_aligned_buffer_page_end(src_v); \
  1515. free_aligned_buffer_page_end(dst_argb_c); \
  1516. free_aligned_buffer_page_end(dst_argb_opt); \
  1517. free_aligned_buffer_page_end(dst_argb32_c); \
  1518. free_aligned_buffer_page_end(dst_argb32_opt); \
  1519. }
  1520. #define TESTPLANARTOBD(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  1521. YALIGN, DIFF, FMT_C, BPP_C) \
  1522. TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  1523. YALIGN, benchmark_width_ - 4, DIFF, _Any, +, 0, FMT_C, BPP_C) \
  1524. TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  1525. YALIGN, benchmark_width_, DIFF, _Unaligned, +, 1, FMT_C, BPP_C) \
  1526. TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  1527. YALIGN, benchmark_width_, DIFF, _Invert, -, 0, FMT_C, BPP_C) \
  1528. TESTPLANARTOBID(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, BPP_B, ALIGN, \
  1529. YALIGN, benchmark_width_, DIFF, _Opt, +, 0, FMT_C, BPP_C)
  1530. TESTPLANARTOBD(I420, 2, 2, RGB565, 2, 2, 1, 9, ARGB, 4)
  1531. #define TESTPTOB(NAME, UYVYTOI420, UYVYTONV12) \
  1532. TEST_F(LibYUVConvertTest, NAME) { \
  1533. const int kWidth = benchmark_width_; \
  1534. const int kHeight = benchmark_height_; \
  1535. \
  1536. align_buffer_page_end(orig_uyvy, \
  1537. 4 * SUBSAMPLE(kWidth, 2) * kHeight); \
  1538. align_buffer_page_end(orig_y, kWidth * kHeight); \
  1539. align_buffer_page_end(orig_u, \
  1540. SUBSAMPLE(kWidth, 2) * \
  1541. SUBSAMPLE(kHeight, 2)); \
  1542. align_buffer_page_end(orig_v, \
  1543. SUBSAMPLE(kWidth, 2) * \
  1544. SUBSAMPLE(kHeight, 2)); \
  1545. \
  1546. align_buffer_page_end(dst_y_orig, kWidth * kHeight); \
  1547. align_buffer_page_end(dst_uv_orig, 2 * \
  1548. SUBSAMPLE(kWidth, 2) * \
  1549. SUBSAMPLE(kHeight, 2)); \
  1550. \
  1551. align_buffer_page_end(dst_y, kWidth * kHeight); \
  1552. align_buffer_page_end(dst_uv, 2 * \
  1553. SUBSAMPLE(kWidth, 2) * \
  1554. SUBSAMPLE(kHeight, 2)); \
  1555. \
  1556. MemRandomize(orig_uyvy, 4 * SUBSAMPLE(kWidth, 2) * kHeight); \
  1557. \
  1558. /* Convert UYVY to NV12 in 2 steps for reference */ \
  1559. libyuv::UYVYTOI420(orig_uyvy, 4 * SUBSAMPLE(kWidth, 2), \
  1560. orig_y, kWidth, \
  1561. orig_u, SUBSAMPLE(kWidth, 2), \
  1562. orig_v, SUBSAMPLE(kWidth, 2), \
  1563. kWidth, kHeight); \
  1564. libyuv::I420ToNV12(orig_y, kWidth, \
  1565. orig_u, SUBSAMPLE(kWidth, 2), \
  1566. orig_v, SUBSAMPLE(kWidth, 2), \
  1567. dst_y_orig, kWidth, \
  1568. dst_uv_orig, 2 * SUBSAMPLE(kWidth, 2), \
  1569. kWidth, kHeight); \
  1570. \
  1571. /* Convert to NV12 */ \
  1572. for (int i = 0; i < benchmark_iterations_; ++i) { \
  1573. libyuv::UYVYTONV12(orig_uyvy, 4 * SUBSAMPLE(kWidth, 2), \
  1574. dst_y, kWidth, \
  1575. dst_uv, 2 * SUBSAMPLE(kWidth, 2), \
  1576. kWidth, kHeight); \
  1577. } \
  1578. \
  1579. for (int i = 0; i < kWidth * kHeight; ++i) { \
  1580. EXPECT_EQ(orig_y[i], dst_y[i]); \
  1581. } \
  1582. for (int i = 0; i < kWidth * kHeight; ++i) { \
  1583. EXPECT_EQ(dst_y_orig[i], dst_y[i]); \
  1584. } \
  1585. for (int i = 0; i < 2 * SUBSAMPLE(kWidth, 2) * SUBSAMPLE(kHeight, 2); ++i) { \
  1586. EXPECT_EQ(dst_uv_orig[i], dst_uv[i]); \
  1587. } \
  1588. \
  1589. free_aligned_buffer_page_end(orig_uyvy); \
  1590. free_aligned_buffer_page_end(orig_y); \
  1591. free_aligned_buffer_page_end(orig_u); \
  1592. free_aligned_buffer_page_end(orig_v); \
  1593. free_aligned_buffer_page_end(dst_y_orig); \
  1594. free_aligned_buffer_page_end(dst_uv_orig); \
  1595. free_aligned_buffer_page_end(dst_y); \
  1596. free_aligned_buffer_page_end(dst_uv); \
  1597. }
  1598. TESTPTOB(TestYUY2ToNV12, YUY2ToI420, YUY2ToNV12)
  1599. TESTPTOB(TestUYVYToNV12, UYVYToI420, UYVYToNV12)
  1600. #define TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1601. W1280, N, NEG, OFF, FMT_C, BPP_C) \
  1602. TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##_##FMT_C##N) { \
  1603. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  1604. const int kHeight = benchmark_height_; \
  1605. const int kStrideB = SUBSAMPLE(kWidth, SUB_B) * BPP_B; \
  1606. const int kStrideUV = SUBSAMPLE(kWidth, SUBSAMP_X); \
  1607. const int kSizeUV = kStrideUV * SUBSAMPLE(kHeight, SUBSAMP_Y); \
  1608. align_buffer_page_end(src_y, kWidth * kHeight + OFF); \
  1609. align_buffer_page_end(src_u, kSizeUV + OFF); \
  1610. align_buffer_page_end(src_v, kSizeUV + OFF); \
  1611. align_buffer_page_end(dst_argb_b, kStrideB * kHeight + OFF); \
  1612. for (int i = 0; i < kWidth * kHeight; ++i) { \
  1613. src_y[i + OFF] = (fastrand() & 0xff); \
  1614. } \
  1615. for (int i = 0; i < kSizeUV; ++i) { \
  1616. src_u[i + OFF] = (fastrand() & 0xff); \
  1617. src_v[i + OFF] = (fastrand() & 0xff); \
  1618. } \
  1619. memset(dst_argb_b + OFF, 1, kStrideB * kHeight); \
  1620. for (int i = 0; i < benchmark_iterations_; ++i) { \
  1621. FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \
  1622. src_u + OFF, kStrideUV, \
  1623. src_v + OFF, kStrideUV, \
  1624. dst_argb_b + OFF, kStrideB, \
  1625. kWidth, NEG kHeight); \
  1626. } \
  1627. /* Convert to a 3rd format in 1 step and 2 steps and compare */ \
  1628. const int kStrideC = kWidth * BPP_C; \
  1629. align_buffer_page_end(dst_argb_c, kStrideC * kHeight + OFF); \
  1630. align_buffer_page_end(dst_argb_bc, kStrideC * kHeight + OFF); \
  1631. memset(dst_argb_c + OFF, 2, kStrideC * kHeight); \
  1632. memset(dst_argb_bc + OFF, 3, kStrideC * kHeight); \
  1633. FMT_PLANAR##To##FMT_C(src_y + OFF, kWidth, \
  1634. src_u + OFF, kStrideUV, \
  1635. src_v + OFF, kStrideUV, \
  1636. dst_argb_c + OFF, kStrideC, \
  1637. kWidth, NEG kHeight); \
  1638. /* Convert B to C */ \
  1639. FMT_B##To##FMT_C(dst_argb_b + OFF, kStrideB, \
  1640. dst_argb_bc + OFF, kStrideC, \
  1641. kWidth, kHeight); \
  1642. for (int i = 0; i < kStrideC * kHeight; ++i) { \
  1643. EXPECT_EQ(dst_argb_c[i + OFF], dst_argb_bc[i + OFF]); \
  1644. } \
  1645. free_aligned_buffer_page_end(src_y); \
  1646. free_aligned_buffer_page_end(src_u); \
  1647. free_aligned_buffer_page_end(src_v); \
  1648. free_aligned_buffer_page_end(dst_argb_b); \
  1649. free_aligned_buffer_page_end(dst_argb_c); \
  1650. free_aligned_buffer_page_end(dst_argb_bc); \
  1651. }
  1652. #define TESTPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1653. FMT_C, BPP_C) \
  1654. TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1655. benchmark_width_ - 4, _Any, +, 0, FMT_C, BPP_C) \
  1656. TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1657. benchmark_width_, _Unaligned, +, 1, FMT_C, BPP_C) \
  1658. TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1659. benchmark_width_, _Invert, -, 0, FMT_C, BPP_C) \
  1660. TESTPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1661. benchmark_width_, _Opt, +, 0, FMT_C, BPP_C)
  1662. TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ABGR, 4)
  1663. TESTPLANARTOE(J420, 2, 2, ARGB, 1, 4, ARGB, 4)
  1664. TESTPLANARTOE(J420, 2, 2, ABGR, 1, 4, ARGB, 4)
  1665. TESTPLANARTOE(H420, 2, 2, ARGB, 1, 4, ARGB, 4)
  1666. TESTPLANARTOE(H420, 2, 2, ABGR, 1, 4, ARGB, 4)
  1667. TESTPLANARTOE(I420, 2, 2, BGRA, 1, 4, ARGB, 4)
  1668. TESTPLANARTOE(I420, 2, 2, ABGR, 1, 4, ARGB, 4)
  1669. TESTPLANARTOE(I420, 2, 2, RGBA, 1, 4, ARGB, 4)
  1670. TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, ARGB, 4)
  1671. TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, RGB24, 3)
  1672. TESTPLANARTOE(I420, 2, 2, RGB24, 1, 3, RAW, 3)
  1673. TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RAW, 3)
  1674. TESTPLANARTOE(I420, 2, 2, RAW, 1, 3, ARGB, 4)
  1675. TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, RGB565, 2)
  1676. TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB1555, 2)
  1677. TESTPLANARTOE(I420, 2, 2, ARGB, 1, 4, ARGB4444, 2)
  1678. TESTPLANARTOE(I422, 2, 1, ARGB, 1, 4, ARGB, 4)
  1679. TESTPLANARTOE(J422, 2, 1, ARGB, 1, 4, ARGB, 4)
  1680. TESTPLANARTOE(J422, 2, 1, ABGR, 1, 4, ARGB, 4)
  1681. TESTPLANARTOE(H422, 2, 1, ARGB, 1, 4, ARGB, 4)
  1682. TESTPLANARTOE(H422, 2, 1, ABGR, 1, 4, ARGB, 4)
  1683. TESTPLANARTOE(I422, 2, 1, BGRA, 1, 4, ARGB, 4)
  1684. TESTPLANARTOE(I422, 2, 1, ABGR, 1, 4, ARGB, 4)
  1685. TESTPLANARTOE(I422, 2, 1, RGBA, 1, 4, ARGB, 4)
  1686. TESTPLANARTOE(I411, 4, 1, ARGB, 1, 4, ARGB, 4)
  1687. TESTPLANARTOE(I444, 1, 1, ARGB, 1, 4, ARGB, 4)
  1688. TESTPLANARTOE(J444, 1, 1, ARGB, 1, 4, ARGB, 4)
  1689. TESTPLANARTOE(I444, 1, 1, ABGR, 1, 4, ARGB, 4)
  1690. TESTPLANARTOE(I420, 2, 2, YUY2, 2, 4, ARGB, 4)
  1691. TESTPLANARTOE(I420, 2, 2, UYVY, 2, 4, ARGB, 4)
  1692. TESTPLANARTOE(I422, 2, 1, YUY2, 2, 4, ARGB, 4)
  1693. TESTPLANARTOE(I422, 2, 1, UYVY, 2, 4, ARGB, 4)
  1694. #define TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1695. W1280, N, NEG, OFF, FMT_C, BPP_C, ATTEN) \
  1696. TEST_F(LibYUVConvertTest, FMT_PLANAR##To##FMT_B##_##FMT_C##N) { \
  1697. const int kWidth = ((W1280) > 0) ? (W1280) : 1; \
  1698. const int kHeight = benchmark_height_; \
  1699. const int kStrideB = SUBSAMPLE(kWidth, SUB_B) * BPP_B; \
  1700. const int kSizeUV = \
  1701. SUBSAMPLE(kWidth, SUBSAMP_X) * SUBSAMPLE(kHeight, SUBSAMP_Y); \
  1702. align_buffer_page_end(src_y, kWidth * kHeight + OFF); \
  1703. align_buffer_page_end(src_u, kSizeUV + OFF); \
  1704. align_buffer_page_end(src_v, kSizeUV + OFF); \
  1705. align_buffer_page_end(src_a, kWidth * kHeight + OFF); \
  1706. align_buffer_page_end(dst_argb_b, kStrideB * kHeight + OFF); \
  1707. for (int i = 0; i < kWidth * kHeight; ++i) { \
  1708. src_y[i + OFF] = (fastrand() & 0xff); \
  1709. src_a[i + OFF] = (fastrand() & 0xff); \
  1710. } \
  1711. for (int i = 0; i < kSizeUV; ++i) { \
  1712. src_u[i + OFF] = (fastrand() & 0xff); \
  1713. src_v[i + OFF] = (fastrand() & 0xff); \
  1714. } \
  1715. memset(dst_argb_b + OFF, 1, kStrideB * kHeight); \
  1716. for (int i = 0; i < benchmark_iterations_; ++i) { \
  1717. FMT_PLANAR##To##FMT_B(src_y + OFF, kWidth, \
  1718. src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \
  1719. src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \
  1720. src_a + OFF, kWidth, \
  1721. dst_argb_b + OFF, kStrideB, \
  1722. kWidth, NEG kHeight, ATTEN); \
  1723. } \
  1724. int max_diff = 0; \
  1725. /* Convert to a 3rd format in 1 step and 2 steps and compare */ \
  1726. const int kStrideC = kWidth * BPP_C; \
  1727. align_buffer_page_end(dst_argb_c, kStrideC * kHeight + OFF); \
  1728. align_buffer_page_end(dst_argb_bc, kStrideC * kHeight + OFF); \
  1729. memset(dst_argb_c + OFF, 2, kStrideC * kHeight); \
  1730. memset(dst_argb_bc + OFF, 3, kStrideC * kHeight); \
  1731. FMT_PLANAR##To##FMT_C(src_y + OFF, kWidth, \
  1732. src_u + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \
  1733. src_v + OFF, SUBSAMPLE(kWidth, SUBSAMP_X), \
  1734. src_a + OFF, kWidth, \
  1735. dst_argb_c + OFF, kStrideC, \
  1736. kWidth, NEG kHeight, ATTEN); \
  1737. /* Convert B to C */ \
  1738. FMT_B##To##FMT_C(dst_argb_b + OFF, kStrideB, \
  1739. dst_argb_bc + OFF, kStrideC, \
  1740. kWidth, kHeight); \
  1741. for (int i = 0; i < kStrideC * kHeight; ++i) { \
  1742. EXPECT_EQ(dst_argb_c[i + OFF], dst_argb_bc[i + OFF]); \
  1743. } \
  1744. free_aligned_buffer_page_end(src_y); \
  1745. free_aligned_buffer_page_end(src_u); \
  1746. free_aligned_buffer_page_end(src_v); \
  1747. free_aligned_buffer_page_end(src_a); \
  1748. free_aligned_buffer_page_end(dst_argb_b); \
  1749. free_aligned_buffer_page_end(dst_argb_c); \
  1750. free_aligned_buffer_page_end(dst_argb_bc); \
  1751. }
  1752. #define TESTQPLANARTOE(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1753. FMT_C, BPP_C) \
  1754. TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1755. benchmark_width_ - 4, _Any, +, 0, FMT_C, BPP_C, 0) \
  1756. TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1757. benchmark_width_, _Unaligned, +, 1, FMT_C, BPP_C, 0) \
  1758. TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1759. benchmark_width_, _Invert, -, 0, FMT_C, BPP_C, 0) \
  1760. TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1761. benchmark_width_, _Opt, +, 0, FMT_C, BPP_C, 0) \
  1762. TESTQPLANARTOEI(FMT_PLANAR, SUBSAMP_X, SUBSAMP_Y, FMT_B, SUB_B, BPP_B, \
  1763. benchmark_width_, _Premult, +, 0, FMT_C, BPP_C, 1)
  1764. TESTQPLANARTOE(I420Alpha, 2, 2, ARGB, 1, 4, ABGR, 4)
  1765. TESTQPLANARTOE(I420Alpha, 2, 2, ABGR, 1, 4, ARGB, 4)
  1766. } // namespace libyuv