convert_from_argb.cc 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287
  1. /*
  2. * Copyright 2012 The LibYuv Project Authors. All rights reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include "libyuv/convert_from_argb.h"
  11. #include "libyuv/basic_types.h"
  12. #include "libyuv/cpu_id.h"
  13. #include "libyuv/planar_functions.h"
  14. #include "libyuv/row.h"
  15. #ifdef __cplusplus
  16. namespace libyuv {
  17. extern "C" {
  18. #endif
  19. // ARGB little endian (bgra in memory) to I444
  20. LIBYUV_API
  21. int ARGBToI444(const uint8* src_argb, int src_stride_argb,
  22. uint8* dst_y, int dst_stride_y,
  23. uint8* dst_u, int dst_stride_u,
  24. uint8* dst_v, int dst_stride_v,
  25. int width, int height) {
  26. int y;
  27. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  28. ARGBToYRow_C;
  29. void (*ARGBToUV444Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
  30. int width) = ARGBToUV444Row_C;
  31. if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  32. return -1;
  33. }
  34. if (height < 0) {
  35. height = -height;
  36. src_argb = src_argb + (height - 1) * src_stride_argb;
  37. src_stride_argb = -src_stride_argb;
  38. }
  39. // Coalesce rows.
  40. if (src_stride_argb == width * 4 &&
  41. dst_stride_y == width &&
  42. dst_stride_u == width &&
  43. dst_stride_v == width) {
  44. width *= height;
  45. height = 1;
  46. src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
  47. }
  48. #if defined(HAS_ARGBTOUV444ROW_SSSE3)
  49. if (TestCpuFlag(kCpuHasSSSE3)) {
  50. ARGBToUV444Row = ARGBToUV444Row_Any_SSSE3;
  51. if (IS_ALIGNED(width, 16)) {
  52. ARGBToUV444Row = ARGBToUV444Row_SSSE3;
  53. }
  54. }
  55. #endif
  56. #if defined(HAS_ARGBTOUV444ROW_NEON)
  57. if (TestCpuFlag(kCpuHasNEON)) {
  58. ARGBToUV444Row = ARGBToUV444Row_Any_NEON;
  59. if (IS_ALIGNED(width, 8)) {
  60. ARGBToUV444Row = ARGBToUV444Row_NEON;
  61. }
  62. }
  63. #endif
  64. #if defined(HAS_ARGBTOYROW_SSSE3)
  65. if (TestCpuFlag(kCpuHasSSSE3)) {
  66. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  67. if (IS_ALIGNED(width, 16)) {
  68. ARGBToYRow = ARGBToYRow_SSSE3;
  69. }
  70. }
  71. #endif
  72. #if defined(HAS_ARGBTOYROW_AVX2)
  73. if (TestCpuFlag(kCpuHasAVX2)) {
  74. ARGBToYRow = ARGBToYRow_Any_AVX2;
  75. if (IS_ALIGNED(width, 32)) {
  76. ARGBToYRow = ARGBToYRow_AVX2;
  77. }
  78. }
  79. #endif
  80. #if defined(HAS_ARGBTOYROW_NEON)
  81. if (TestCpuFlag(kCpuHasNEON)) {
  82. ARGBToYRow = ARGBToYRow_Any_NEON;
  83. if (IS_ALIGNED(width, 8)) {
  84. ARGBToYRow = ARGBToYRow_NEON;
  85. }
  86. }
  87. #endif
  88. for (y = 0; y < height; ++y) {
  89. ARGBToUV444Row(src_argb, dst_u, dst_v, width);
  90. ARGBToYRow(src_argb, dst_y, width);
  91. src_argb += src_stride_argb;
  92. dst_y += dst_stride_y;
  93. dst_u += dst_stride_u;
  94. dst_v += dst_stride_v;
  95. }
  96. return 0;
  97. }
  98. // ARGB little endian (bgra in memory) to I422
  99. LIBYUV_API
  100. int ARGBToI422(const uint8* src_argb, int src_stride_argb,
  101. uint8* dst_y, int dst_stride_y,
  102. uint8* dst_u, int dst_stride_u,
  103. uint8* dst_v, int dst_stride_v,
  104. int width, int height) {
  105. int y;
  106. void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
  107. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
  108. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  109. ARGBToYRow_C;
  110. if (!src_argb ||
  111. !dst_y || !dst_u || !dst_v ||
  112. width <= 0 || height == 0) {
  113. return -1;
  114. }
  115. // Negative height means invert the image.
  116. if (height < 0) {
  117. height = -height;
  118. src_argb = src_argb + (height - 1) * src_stride_argb;
  119. src_stride_argb = -src_stride_argb;
  120. }
  121. // Coalesce rows.
  122. if (src_stride_argb == width * 4 &&
  123. dst_stride_y == width &&
  124. dst_stride_u * 2 == width &&
  125. dst_stride_v * 2 == width) {
  126. width *= height;
  127. height = 1;
  128. src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
  129. }
  130. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  131. if (TestCpuFlag(kCpuHasSSSE3)) {
  132. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  133. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  134. if (IS_ALIGNED(width, 16)) {
  135. ARGBToUVRow = ARGBToUVRow_SSSE3;
  136. ARGBToYRow = ARGBToYRow_SSSE3;
  137. }
  138. }
  139. #endif
  140. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  141. if (TestCpuFlag(kCpuHasAVX2)) {
  142. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  143. ARGBToYRow = ARGBToYRow_Any_AVX2;
  144. if (IS_ALIGNED(width, 32)) {
  145. ARGBToUVRow = ARGBToUVRow_AVX2;
  146. ARGBToYRow = ARGBToYRow_AVX2;
  147. }
  148. }
  149. #endif
  150. #if defined(HAS_ARGBTOYROW_NEON)
  151. if (TestCpuFlag(kCpuHasNEON)) {
  152. ARGBToYRow = ARGBToYRow_Any_NEON;
  153. if (IS_ALIGNED(width, 8)) {
  154. ARGBToYRow = ARGBToYRow_NEON;
  155. }
  156. }
  157. #endif
  158. #if defined(HAS_ARGBTOUVROW_NEON)
  159. if (TestCpuFlag(kCpuHasNEON)) {
  160. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  161. if (IS_ALIGNED(width, 16)) {
  162. ARGBToUVRow = ARGBToUVRow_NEON;
  163. }
  164. }
  165. #endif
  166. for (y = 0; y < height; ++y) {
  167. ARGBToUVRow(src_argb, 0, dst_u, dst_v, width);
  168. ARGBToYRow(src_argb, dst_y, width);
  169. src_argb += src_stride_argb;
  170. dst_y += dst_stride_y;
  171. dst_u += dst_stride_u;
  172. dst_v += dst_stride_v;
  173. }
  174. return 0;
  175. }
  176. // ARGB little endian (bgra in memory) to I411
  177. LIBYUV_API
  178. int ARGBToI411(const uint8* src_argb, int src_stride_argb,
  179. uint8* dst_y, int dst_stride_y,
  180. uint8* dst_u, int dst_stride_u,
  181. uint8* dst_v, int dst_stride_v,
  182. int width, int height) {
  183. int y;
  184. void (*ARGBToUV411Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
  185. int width) = ARGBToUV411Row_C;
  186. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  187. ARGBToYRow_C;
  188. if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
  189. return -1;
  190. }
  191. if (height < 0) {
  192. height = -height;
  193. src_argb = src_argb + (height - 1) * src_stride_argb;
  194. src_stride_argb = -src_stride_argb;
  195. }
  196. // Coalesce rows.
  197. if (src_stride_argb == width * 4 &&
  198. dst_stride_y == width &&
  199. dst_stride_u * 4 == width &&
  200. dst_stride_v * 4 == width) {
  201. width *= height;
  202. height = 1;
  203. src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
  204. }
  205. #if defined(HAS_ARGBTOYROW_SSSE3)
  206. if (TestCpuFlag(kCpuHasSSSE3)) {
  207. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  208. if (IS_ALIGNED(width, 16)) {
  209. ARGBToYRow = ARGBToYRow_SSSE3;
  210. }
  211. }
  212. #endif
  213. #if defined(HAS_ARGBTOYROW_AVX2)
  214. if (TestCpuFlag(kCpuHasAVX2)) {
  215. ARGBToYRow = ARGBToYRow_Any_AVX2;
  216. if (IS_ALIGNED(width, 32)) {
  217. ARGBToYRow = ARGBToYRow_AVX2;
  218. }
  219. }
  220. #endif
  221. #if defined(HAS_ARGBTOYROW_NEON)
  222. if (TestCpuFlag(kCpuHasNEON)) {
  223. ARGBToYRow = ARGBToYRow_Any_NEON;
  224. if (IS_ALIGNED(width, 8)) {
  225. ARGBToYRow = ARGBToYRow_NEON;
  226. }
  227. }
  228. #endif
  229. #if defined(HAS_ARGBTOUV411ROW_NEON)
  230. if (TestCpuFlag(kCpuHasNEON)) {
  231. ARGBToUV411Row = ARGBToUV411Row_Any_NEON;
  232. if (IS_ALIGNED(width, 32)) {
  233. ARGBToUV411Row = ARGBToUV411Row_NEON;
  234. }
  235. }
  236. #endif
  237. for (y = 0; y < height; ++y) {
  238. ARGBToUV411Row(src_argb, dst_u, dst_v, width);
  239. ARGBToYRow(src_argb, dst_y, width);
  240. src_argb += src_stride_argb;
  241. dst_y += dst_stride_y;
  242. dst_u += dst_stride_u;
  243. dst_v += dst_stride_v;
  244. }
  245. return 0;
  246. }
  247. LIBYUV_API
  248. int ARGBToNV12(const uint8* src_argb, int src_stride_argb,
  249. uint8* dst_y, int dst_stride_y,
  250. uint8* dst_uv, int dst_stride_uv,
  251. int width, int height) {
  252. int y;
  253. int halfwidth = (width + 1) >> 1;
  254. void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
  255. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
  256. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  257. ARGBToYRow_C;
  258. void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
  259. int width) = MergeUVRow_C;
  260. if (!src_argb ||
  261. !dst_y || !dst_uv ||
  262. width <= 0 || height == 0) {
  263. return -1;
  264. }
  265. // Negative height means invert the image.
  266. if (height < 0) {
  267. height = -height;
  268. src_argb = src_argb + (height - 1) * src_stride_argb;
  269. src_stride_argb = -src_stride_argb;
  270. }
  271. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  272. if (TestCpuFlag(kCpuHasSSSE3)) {
  273. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  274. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  275. if (IS_ALIGNED(width, 16)) {
  276. ARGBToUVRow = ARGBToUVRow_SSSE3;
  277. ARGBToYRow = ARGBToYRow_SSSE3;
  278. }
  279. }
  280. #endif
  281. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  282. if (TestCpuFlag(kCpuHasAVX2)) {
  283. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  284. ARGBToYRow = ARGBToYRow_Any_AVX2;
  285. if (IS_ALIGNED(width, 32)) {
  286. ARGBToUVRow = ARGBToUVRow_AVX2;
  287. ARGBToYRow = ARGBToYRow_AVX2;
  288. }
  289. }
  290. #endif
  291. #if defined(HAS_ARGBTOYROW_NEON)
  292. if (TestCpuFlag(kCpuHasNEON)) {
  293. ARGBToYRow = ARGBToYRow_Any_NEON;
  294. if (IS_ALIGNED(width, 8)) {
  295. ARGBToYRow = ARGBToYRow_NEON;
  296. }
  297. }
  298. #endif
  299. #if defined(HAS_ARGBTOUVROW_NEON)
  300. if (TestCpuFlag(kCpuHasNEON)) {
  301. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  302. if (IS_ALIGNED(width, 16)) {
  303. ARGBToUVRow = ARGBToUVRow_NEON;
  304. }
  305. }
  306. #endif
  307. #if defined(HAS_MERGEUVROW_SSE2)
  308. if (TestCpuFlag(kCpuHasSSE2)) {
  309. MergeUVRow_ = MergeUVRow_Any_SSE2;
  310. if (IS_ALIGNED(halfwidth, 16)) {
  311. MergeUVRow_ = MergeUVRow_SSE2;
  312. }
  313. }
  314. #endif
  315. #if defined(HAS_MERGEUVROW_AVX2)
  316. if (TestCpuFlag(kCpuHasAVX2)) {
  317. MergeUVRow_ = MergeUVRow_Any_AVX2;
  318. if (IS_ALIGNED(halfwidth, 32)) {
  319. MergeUVRow_ = MergeUVRow_AVX2;
  320. }
  321. }
  322. #endif
  323. #if defined(HAS_MERGEUVROW_NEON)
  324. if (TestCpuFlag(kCpuHasNEON)) {
  325. MergeUVRow_ = MergeUVRow_Any_NEON;
  326. if (IS_ALIGNED(halfwidth, 16)) {
  327. MergeUVRow_ = MergeUVRow_NEON;
  328. }
  329. }
  330. #endif
  331. {
  332. // Allocate a rows of uv.
  333. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  334. uint8* row_v = row_u + ((halfwidth + 31) & ~31);
  335. for (y = 0; y < height - 1; y += 2) {
  336. ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
  337. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  338. ARGBToYRow(src_argb, dst_y, width);
  339. ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
  340. src_argb += src_stride_argb * 2;
  341. dst_y += dst_stride_y * 2;
  342. dst_uv += dst_stride_uv;
  343. }
  344. if (height & 1) {
  345. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  346. MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
  347. ARGBToYRow(src_argb, dst_y, width);
  348. }
  349. free_aligned_buffer_64(row_u);
  350. }
  351. return 0;
  352. }
  353. // Same as NV12 but U and V swapped.
  354. LIBYUV_API
  355. int ARGBToNV21(const uint8* src_argb, int src_stride_argb,
  356. uint8* dst_y, int dst_stride_y,
  357. uint8* dst_uv, int dst_stride_uv,
  358. int width, int height) {
  359. int y;
  360. int halfwidth = (width + 1) >> 1;
  361. void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
  362. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
  363. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  364. ARGBToYRow_C;
  365. void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
  366. int width) = MergeUVRow_C;
  367. if (!src_argb ||
  368. !dst_y || !dst_uv ||
  369. width <= 0 || height == 0) {
  370. return -1;
  371. }
  372. // Negative height means invert the image.
  373. if (height < 0) {
  374. height = -height;
  375. src_argb = src_argb + (height - 1) * src_stride_argb;
  376. src_stride_argb = -src_stride_argb;
  377. }
  378. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  379. if (TestCpuFlag(kCpuHasSSSE3)) {
  380. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  381. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  382. if (IS_ALIGNED(width, 16)) {
  383. ARGBToUVRow = ARGBToUVRow_SSSE3;
  384. ARGBToYRow = ARGBToYRow_SSSE3;
  385. }
  386. }
  387. #endif
  388. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  389. if (TestCpuFlag(kCpuHasAVX2)) {
  390. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  391. ARGBToYRow = ARGBToYRow_Any_AVX2;
  392. if (IS_ALIGNED(width, 32)) {
  393. ARGBToUVRow = ARGBToUVRow_AVX2;
  394. ARGBToYRow = ARGBToYRow_AVX2;
  395. }
  396. }
  397. #endif
  398. #if defined(HAS_ARGBTOYROW_NEON)
  399. if (TestCpuFlag(kCpuHasNEON)) {
  400. ARGBToYRow = ARGBToYRow_Any_NEON;
  401. if (IS_ALIGNED(width, 8)) {
  402. ARGBToYRow = ARGBToYRow_NEON;
  403. }
  404. }
  405. #endif
  406. #if defined(HAS_ARGBTOUVROW_NEON)
  407. if (TestCpuFlag(kCpuHasNEON)) {
  408. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  409. if (IS_ALIGNED(width, 16)) {
  410. ARGBToUVRow = ARGBToUVRow_NEON;
  411. }
  412. }
  413. #endif
  414. #if defined(HAS_MERGEUVROW_SSE2)
  415. if (TestCpuFlag(kCpuHasSSE2)) {
  416. MergeUVRow_ = MergeUVRow_Any_SSE2;
  417. if (IS_ALIGNED(halfwidth, 16)) {
  418. MergeUVRow_ = MergeUVRow_SSE2;
  419. }
  420. }
  421. #endif
  422. #if defined(HAS_MERGEUVROW_AVX2)
  423. if (TestCpuFlag(kCpuHasAVX2)) {
  424. MergeUVRow_ = MergeUVRow_Any_AVX2;
  425. if (IS_ALIGNED(halfwidth, 32)) {
  426. MergeUVRow_ = MergeUVRow_AVX2;
  427. }
  428. }
  429. #endif
  430. #if defined(HAS_MERGEUVROW_NEON)
  431. if (TestCpuFlag(kCpuHasNEON)) {
  432. MergeUVRow_ = MergeUVRow_Any_NEON;
  433. if (IS_ALIGNED(halfwidth, 16)) {
  434. MergeUVRow_ = MergeUVRow_NEON;
  435. }
  436. }
  437. #endif
  438. {
  439. // Allocate a rows of uv.
  440. align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
  441. uint8* row_v = row_u + ((halfwidth + 31) & ~31);
  442. for (y = 0; y < height - 1; y += 2) {
  443. ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
  444. MergeUVRow_(row_v, row_u, dst_uv, halfwidth);
  445. ARGBToYRow(src_argb, dst_y, width);
  446. ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
  447. src_argb += src_stride_argb * 2;
  448. dst_y += dst_stride_y * 2;
  449. dst_uv += dst_stride_uv;
  450. }
  451. if (height & 1) {
  452. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  453. MergeUVRow_(row_v, row_u, dst_uv, halfwidth);
  454. ARGBToYRow(src_argb, dst_y, width);
  455. }
  456. free_aligned_buffer_64(row_u);
  457. }
  458. return 0;
  459. }
  460. // Convert ARGB to YUY2.
  461. LIBYUV_API
  462. int ARGBToYUY2(const uint8* src_argb, int src_stride_argb,
  463. uint8* dst_yuy2, int dst_stride_yuy2,
  464. int width, int height) {
  465. int y;
  466. void (*ARGBToUVRow)(const uint8* src_argb, int src_stride_argb,
  467. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
  468. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  469. ARGBToYRow_C;
  470. void (*I422ToYUY2Row)(const uint8* src_y, const uint8* src_u,
  471. const uint8* src_v, uint8* dst_yuy2, int width) = I422ToYUY2Row_C;
  472. if (!src_argb || !dst_yuy2 ||
  473. width <= 0 || height == 0) {
  474. return -1;
  475. }
  476. // Negative height means invert the image.
  477. if (height < 0) {
  478. height = -height;
  479. dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
  480. dst_stride_yuy2 = -dst_stride_yuy2;
  481. }
  482. // Coalesce rows.
  483. if (src_stride_argb == width * 4 &&
  484. dst_stride_yuy2 == width * 2) {
  485. width *= height;
  486. height = 1;
  487. src_stride_argb = dst_stride_yuy2 = 0;
  488. }
  489. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  490. if (TestCpuFlag(kCpuHasSSSE3)) {
  491. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  492. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  493. if (IS_ALIGNED(width, 16)) {
  494. ARGBToUVRow = ARGBToUVRow_SSSE3;
  495. ARGBToYRow = ARGBToYRow_SSSE3;
  496. }
  497. }
  498. #endif
  499. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  500. if (TestCpuFlag(kCpuHasAVX2)) {
  501. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  502. ARGBToYRow = ARGBToYRow_Any_AVX2;
  503. if (IS_ALIGNED(width, 32)) {
  504. ARGBToUVRow = ARGBToUVRow_AVX2;
  505. ARGBToYRow = ARGBToYRow_AVX2;
  506. }
  507. }
  508. #endif
  509. #if defined(HAS_ARGBTOYROW_NEON)
  510. if (TestCpuFlag(kCpuHasNEON)) {
  511. ARGBToYRow = ARGBToYRow_Any_NEON;
  512. if (IS_ALIGNED(width, 8)) {
  513. ARGBToYRow = ARGBToYRow_NEON;
  514. }
  515. }
  516. #endif
  517. #if defined(HAS_ARGBTOUVROW_NEON)
  518. if (TestCpuFlag(kCpuHasNEON)) {
  519. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  520. if (IS_ALIGNED(width, 16)) {
  521. ARGBToUVRow = ARGBToUVRow_NEON;
  522. }
  523. }
  524. #endif
  525. #if defined(HAS_I422TOYUY2ROW_SSE2)
  526. if (TestCpuFlag(kCpuHasSSE2)) {
  527. I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
  528. if (IS_ALIGNED(width, 16)) {
  529. I422ToYUY2Row = I422ToYUY2Row_SSE2;
  530. }
  531. }
  532. #endif
  533. #if defined(HAS_I422TOYUY2ROW_NEON)
  534. if (TestCpuFlag(kCpuHasNEON)) {
  535. I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
  536. if (IS_ALIGNED(width, 16)) {
  537. I422ToYUY2Row = I422ToYUY2Row_NEON;
  538. }
  539. }
  540. #endif
  541. {
  542. // Allocate a rows of yuv.
  543. align_buffer_64(row_y, ((width + 63) & ~63) * 2);
  544. uint8* row_u = row_y + ((width + 63) & ~63);
  545. uint8* row_v = row_u + ((width + 63) & ~63) / 2;
  546. for (y = 0; y < height; ++y) {
  547. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  548. ARGBToYRow(src_argb, row_y, width);
  549. I422ToYUY2Row(row_y, row_u, row_v, dst_yuy2, width);
  550. src_argb += src_stride_argb;
  551. dst_yuy2 += dst_stride_yuy2;
  552. }
  553. free_aligned_buffer_64(row_y);
  554. }
  555. return 0;
  556. }
  557. // Convert ARGB to UYVY.
  558. LIBYUV_API
  559. int ARGBToUYVY(const uint8* src_argb, int src_stride_argb,
  560. uint8* dst_uyvy, int dst_stride_uyvy,
  561. int width, int height) {
  562. int y;
  563. void (*ARGBToUVRow)(const uint8* src_argb, int src_stride_argb,
  564. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
  565. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  566. ARGBToYRow_C;
  567. void (*I422ToUYVYRow)(const uint8* src_y, const uint8* src_u,
  568. const uint8* src_v, uint8* dst_uyvy, int width) = I422ToUYVYRow_C;
  569. if (!src_argb || !dst_uyvy ||
  570. width <= 0 || height == 0) {
  571. return -1;
  572. }
  573. // Negative height means invert the image.
  574. if (height < 0) {
  575. height = -height;
  576. dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
  577. dst_stride_uyvy = -dst_stride_uyvy;
  578. }
  579. // Coalesce rows.
  580. if (src_stride_argb == width * 4 &&
  581. dst_stride_uyvy == width * 2) {
  582. width *= height;
  583. height = 1;
  584. src_stride_argb = dst_stride_uyvy = 0;
  585. }
  586. #if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
  587. if (TestCpuFlag(kCpuHasSSSE3)) {
  588. ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
  589. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  590. if (IS_ALIGNED(width, 16)) {
  591. ARGBToUVRow = ARGBToUVRow_SSSE3;
  592. ARGBToYRow = ARGBToYRow_SSSE3;
  593. }
  594. }
  595. #endif
  596. #if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
  597. if (TestCpuFlag(kCpuHasAVX2)) {
  598. ARGBToUVRow = ARGBToUVRow_Any_AVX2;
  599. ARGBToYRow = ARGBToYRow_Any_AVX2;
  600. if (IS_ALIGNED(width, 32)) {
  601. ARGBToUVRow = ARGBToUVRow_AVX2;
  602. ARGBToYRow = ARGBToYRow_AVX2;
  603. }
  604. }
  605. #endif
  606. #if defined(HAS_ARGBTOYROW_NEON)
  607. if (TestCpuFlag(kCpuHasNEON)) {
  608. ARGBToYRow = ARGBToYRow_Any_NEON;
  609. if (IS_ALIGNED(width, 8)) {
  610. ARGBToYRow = ARGBToYRow_NEON;
  611. }
  612. }
  613. #endif
  614. #if defined(HAS_ARGBTOUVROW_NEON)
  615. if (TestCpuFlag(kCpuHasNEON)) {
  616. ARGBToUVRow = ARGBToUVRow_Any_NEON;
  617. if (IS_ALIGNED(width, 16)) {
  618. ARGBToUVRow = ARGBToUVRow_NEON;
  619. }
  620. }
  621. #endif
  622. #if defined(HAS_I422TOUYVYROW_SSE2)
  623. if (TestCpuFlag(kCpuHasSSE2)) {
  624. I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
  625. if (IS_ALIGNED(width, 16)) {
  626. I422ToUYVYRow = I422ToUYVYRow_SSE2;
  627. }
  628. }
  629. #endif
  630. #if defined(HAS_I422TOUYVYROW_NEON)
  631. if (TestCpuFlag(kCpuHasNEON)) {
  632. I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
  633. if (IS_ALIGNED(width, 16)) {
  634. I422ToUYVYRow = I422ToUYVYRow_NEON;
  635. }
  636. }
  637. #endif
  638. {
  639. // Allocate a rows of yuv.
  640. align_buffer_64(row_y, ((width + 63) & ~63) * 2);
  641. uint8* row_u = row_y + ((width + 63) & ~63);
  642. uint8* row_v = row_u + ((width + 63) & ~63) / 2;
  643. for (y = 0; y < height; ++y) {
  644. ARGBToUVRow(src_argb, 0, row_u, row_v, width);
  645. ARGBToYRow(src_argb, row_y, width);
  646. I422ToUYVYRow(row_y, row_u, row_v, dst_uyvy, width);
  647. src_argb += src_stride_argb;
  648. dst_uyvy += dst_stride_uyvy;
  649. }
  650. free_aligned_buffer_64(row_y);
  651. }
  652. return 0;
  653. }
  654. // Convert ARGB to I400.
  655. LIBYUV_API
  656. int ARGBToI400(const uint8* src_argb, int src_stride_argb,
  657. uint8* dst_y, int dst_stride_y,
  658. int width, int height) {
  659. int y;
  660. void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int width) =
  661. ARGBToYRow_C;
  662. if (!src_argb || !dst_y || width <= 0 || height == 0) {
  663. return -1;
  664. }
  665. if (height < 0) {
  666. height = -height;
  667. src_argb = src_argb + (height - 1) * src_stride_argb;
  668. src_stride_argb = -src_stride_argb;
  669. }
  670. // Coalesce rows.
  671. if (src_stride_argb == width * 4 &&
  672. dst_stride_y == width) {
  673. width *= height;
  674. height = 1;
  675. src_stride_argb = dst_stride_y = 0;
  676. }
  677. #if defined(HAS_ARGBTOYROW_SSSE3)
  678. if (TestCpuFlag(kCpuHasSSSE3)) {
  679. ARGBToYRow = ARGBToYRow_Any_SSSE3;
  680. if (IS_ALIGNED(width, 16)) {
  681. ARGBToYRow = ARGBToYRow_SSSE3;
  682. }
  683. }
  684. #endif
  685. #if defined(HAS_ARGBTOYROW_AVX2)
  686. if (TestCpuFlag(kCpuHasAVX2)) {
  687. ARGBToYRow = ARGBToYRow_Any_AVX2;
  688. if (IS_ALIGNED(width, 32)) {
  689. ARGBToYRow = ARGBToYRow_AVX2;
  690. }
  691. }
  692. #endif
  693. #if defined(HAS_ARGBTOYROW_NEON)
  694. if (TestCpuFlag(kCpuHasNEON)) {
  695. ARGBToYRow = ARGBToYRow_Any_NEON;
  696. if (IS_ALIGNED(width, 8)) {
  697. ARGBToYRow = ARGBToYRow_NEON;
  698. }
  699. }
  700. #endif
  701. for (y = 0; y < height; ++y) {
  702. ARGBToYRow(src_argb, dst_y, width);
  703. src_argb += src_stride_argb;
  704. dst_y += dst_stride_y;
  705. }
  706. return 0;
  707. }
  708. // Shuffle table for converting ARGB to RGBA.
  709. static uvec8 kShuffleMaskARGBToRGBA = {
  710. 3u, 0u, 1u, 2u, 7u, 4u, 5u, 6u, 11u, 8u, 9u, 10u, 15u, 12u, 13u, 14u
  711. };
  712. // Convert ARGB to RGBA.
  713. LIBYUV_API
  714. int ARGBToRGBA(const uint8* src_argb, int src_stride_argb,
  715. uint8* dst_rgba, int dst_stride_rgba,
  716. int width, int height) {
  717. return ARGBShuffle(src_argb, src_stride_argb,
  718. dst_rgba, dst_stride_rgba,
  719. (const uint8*)(&kShuffleMaskARGBToRGBA),
  720. width, height);
  721. }
  722. // Convert ARGB To RGB24.
  723. LIBYUV_API
  724. int ARGBToRGB24(const uint8* src_argb, int src_stride_argb,
  725. uint8* dst_rgb24, int dst_stride_rgb24,
  726. int width, int height) {
  727. int y;
  728. void (*ARGBToRGB24Row)(const uint8* src_argb, uint8* dst_rgb, int width) =
  729. ARGBToRGB24Row_C;
  730. if (!src_argb || !dst_rgb24 || width <= 0 || height == 0) {
  731. return -1;
  732. }
  733. if (height < 0) {
  734. height = -height;
  735. src_argb = src_argb + (height - 1) * src_stride_argb;
  736. src_stride_argb = -src_stride_argb;
  737. }
  738. // Coalesce rows.
  739. if (src_stride_argb == width * 4 &&
  740. dst_stride_rgb24 == width * 3) {
  741. width *= height;
  742. height = 1;
  743. src_stride_argb = dst_stride_rgb24 = 0;
  744. }
  745. #if defined(HAS_ARGBTORGB24ROW_SSSE3)
  746. if (TestCpuFlag(kCpuHasSSSE3)) {
  747. ARGBToRGB24Row = ARGBToRGB24Row_Any_SSSE3;
  748. if (IS_ALIGNED(width, 16)) {
  749. ARGBToRGB24Row = ARGBToRGB24Row_SSSE3;
  750. }
  751. }
  752. #endif
  753. #if defined(HAS_ARGBTORGB24ROW_NEON)
  754. if (TestCpuFlag(kCpuHasNEON)) {
  755. ARGBToRGB24Row = ARGBToRGB24Row_Any_NEON;
  756. if (IS_ALIGNED(width, 8)) {
  757. ARGBToRGB24Row = ARGBToRGB24Row_NEON;
  758. }
  759. }
  760. #endif
  761. for (y = 0; y < height; ++y) {
  762. ARGBToRGB24Row(src_argb, dst_rgb24, width);
  763. src_argb += src_stride_argb;
  764. dst_rgb24 += dst_stride_rgb24;
  765. }
  766. return 0;
  767. }
  768. // Convert ARGB To RAW.
  769. LIBYUV_API
  770. int ARGBToRAW(const uint8* src_argb, int src_stride_argb,
  771. uint8* dst_raw, int dst_stride_raw,
  772. int width, int height) {
  773. int y;
  774. void (*ARGBToRAWRow)(const uint8* src_argb, uint8* dst_rgb, int width) =
  775. ARGBToRAWRow_C;
  776. if (!src_argb || !dst_raw || width <= 0 || height == 0) {
  777. return -1;
  778. }
  779. if (height < 0) {
  780. height = -height;
  781. src_argb = src_argb + (height - 1) * src_stride_argb;
  782. src_stride_argb = -src_stride_argb;
  783. }
  784. // Coalesce rows.
  785. if (src_stride_argb == width * 4 &&
  786. dst_stride_raw == width * 3) {
  787. width *= height;
  788. height = 1;
  789. src_stride_argb = dst_stride_raw = 0;
  790. }
  791. #if defined(HAS_ARGBTORAWROW_SSSE3)
  792. if (TestCpuFlag(kCpuHasSSSE3)) {
  793. ARGBToRAWRow = ARGBToRAWRow_Any_SSSE3;
  794. if (IS_ALIGNED(width, 16)) {
  795. ARGBToRAWRow = ARGBToRAWRow_SSSE3;
  796. }
  797. }
  798. #endif
  799. #if defined(HAS_ARGBTORAWROW_NEON)
  800. if (TestCpuFlag(kCpuHasNEON)) {
  801. ARGBToRAWRow = ARGBToRAWRow_Any_NEON;
  802. if (IS_ALIGNED(width, 8)) {
  803. ARGBToRAWRow = ARGBToRAWRow_NEON;
  804. }
  805. }
  806. #endif
  807. for (y = 0; y < height; ++y) {
  808. ARGBToRAWRow(src_argb, dst_raw, width);
  809. src_argb += src_stride_argb;
  810. dst_raw += dst_stride_raw;
  811. }
  812. return 0;
  813. }
  814. // Ordered 8x8 dither for 888 to 565. Values from 0 to 7.
  815. static const uint8 kDither565_4x4[16] = {
  816. 0, 4, 1, 5,
  817. 6, 2, 7, 3,
  818. 1, 5, 0, 4,
  819. 7, 3, 6, 2,
  820. };
  821. // Convert ARGB To RGB565 with 4x4 dither matrix (16 bytes).
  822. LIBYUV_API
  823. int ARGBToRGB565Dither(const uint8* src_argb, int src_stride_argb,
  824. uint8* dst_rgb565, int dst_stride_rgb565,
  825. const uint8* dither4x4, int width, int height) {
  826. int y;
  827. void (*ARGBToRGB565DitherRow)(const uint8* src_argb, uint8* dst_rgb,
  828. const uint32 dither4, int width) = ARGBToRGB565DitherRow_C;
  829. if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
  830. return -1;
  831. }
  832. if (height < 0) {
  833. height = -height;
  834. src_argb = src_argb + (height - 1) * src_stride_argb;
  835. src_stride_argb = -src_stride_argb;
  836. }
  837. if (!dither4x4) {
  838. dither4x4 = kDither565_4x4;
  839. }
  840. #if defined(HAS_ARGBTORGB565DITHERROW_SSE2)
  841. if (TestCpuFlag(kCpuHasSSE2)) {
  842. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_SSE2;
  843. if (IS_ALIGNED(width, 4)) {
  844. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_SSE2;
  845. }
  846. }
  847. #endif
  848. #if defined(HAS_ARGBTORGB565DITHERROW_AVX2)
  849. if (TestCpuFlag(kCpuHasAVX2)) {
  850. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_AVX2;
  851. if (IS_ALIGNED(width, 8)) {
  852. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_AVX2;
  853. }
  854. }
  855. #endif
  856. #if defined(HAS_ARGBTORGB565DITHERROW_NEON)
  857. if (TestCpuFlag(kCpuHasNEON)) {
  858. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_NEON;
  859. if (IS_ALIGNED(width, 8)) {
  860. ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_NEON;
  861. }
  862. }
  863. #endif
  864. for (y = 0; y < height; ++y) {
  865. ARGBToRGB565DitherRow(src_argb, dst_rgb565,
  866. *(uint32*)(dither4x4 + ((y & 3) << 2)), width);
  867. src_argb += src_stride_argb;
  868. dst_rgb565 += dst_stride_rgb565;
  869. }
  870. return 0;
  871. }
  872. // Convert ARGB To RGB565.
  873. // TODO(fbarchard): Consider using dither function low level with zeros.
  874. LIBYUV_API
  875. int ARGBToRGB565(const uint8* src_argb, int src_stride_argb,
  876. uint8* dst_rgb565, int dst_stride_rgb565,
  877. int width, int height) {
  878. int y;
  879. void (*ARGBToRGB565Row)(const uint8* src_argb, uint8* dst_rgb, int width) =
  880. ARGBToRGB565Row_C;
  881. if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
  882. return -1;
  883. }
  884. if (height < 0) {
  885. height = -height;
  886. src_argb = src_argb + (height - 1) * src_stride_argb;
  887. src_stride_argb = -src_stride_argb;
  888. }
  889. // Coalesce rows.
  890. if (src_stride_argb == width * 4 &&
  891. dst_stride_rgb565 == width * 2) {
  892. width *= height;
  893. height = 1;
  894. src_stride_argb = dst_stride_rgb565 = 0;
  895. }
  896. #if defined(HAS_ARGBTORGB565ROW_SSE2)
  897. if (TestCpuFlag(kCpuHasSSE2)) {
  898. ARGBToRGB565Row = ARGBToRGB565Row_Any_SSE2;
  899. if (IS_ALIGNED(width, 4)) {
  900. ARGBToRGB565Row = ARGBToRGB565Row_SSE2;
  901. }
  902. }
  903. #endif
  904. #if defined(HAS_ARGBTORGB565ROW_AVX2)
  905. if (TestCpuFlag(kCpuHasAVX2)) {
  906. ARGBToRGB565Row = ARGBToRGB565Row_Any_AVX2;
  907. if (IS_ALIGNED(width, 8)) {
  908. ARGBToRGB565Row = ARGBToRGB565Row_AVX2;
  909. }
  910. }
  911. #endif
  912. #if defined(HAS_ARGBTORGB565ROW_NEON)
  913. if (TestCpuFlag(kCpuHasNEON)) {
  914. ARGBToRGB565Row = ARGBToRGB565Row_Any_NEON;
  915. if (IS_ALIGNED(width, 8)) {
  916. ARGBToRGB565Row = ARGBToRGB565Row_NEON;
  917. }
  918. }
  919. #endif
  920. for (y = 0; y < height; ++y) {
  921. ARGBToRGB565Row(src_argb, dst_rgb565, width);
  922. src_argb += src_stride_argb;
  923. dst_rgb565 += dst_stride_rgb565;
  924. }
  925. return 0;
  926. }
  927. // Convert ARGB To ARGB1555.
  928. LIBYUV_API
  929. int ARGBToARGB1555(const uint8* src_argb, int src_stride_argb,
  930. uint8* dst_argb1555, int dst_stride_argb1555,
  931. int width, int height) {
  932. int y;
  933. void (*ARGBToARGB1555Row)(const uint8* src_argb, uint8* dst_rgb, int width) =
  934. ARGBToARGB1555Row_C;
  935. if (!src_argb || !dst_argb1555 || width <= 0 || height == 0) {
  936. return -1;
  937. }
  938. if (height < 0) {
  939. height = -height;
  940. src_argb = src_argb + (height - 1) * src_stride_argb;
  941. src_stride_argb = -src_stride_argb;
  942. }
  943. // Coalesce rows.
  944. if (src_stride_argb == width * 4 &&
  945. dst_stride_argb1555 == width * 2) {
  946. width *= height;
  947. height = 1;
  948. src_stride_argb = dst_stride_argb1555 = 0;
  949. }
  950. #if defined(HAS_ARGBTOARGB1555ROW_SSE2)
  951. if (TestCpuFlag(kCpuHasSSE2)) {
  952. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_SSE2;
  953. if (IS_ALIGNED(width, 4)) {
  954. ARGBToARGB1555Row = ARGBToARGB1555Row_SSE2;
  955. }
  956. }
  957. #endif
  958. #if defined(HAS_ARGBTOARGB1555ROW_AVX2)
  959. if (TestCpuFlag(kCpuHasAVX2)) {
  960. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_AVX2;
  961. if (IS_ALIGNED(width, 8)) {
  962. ARGBToARGB1555Row = ARGBToARGB1555Row_AVX2;
  963. }
  964. }
  965. #endif
  966. #if defined(HAS_ARGBTOARGB1555ROW_NEON)
  967. if (TestCpuFlag(kCpuHasNEON)) {
  968. ARGBToARGB1555Row = ARGBToARGB1555Row_Any_NEON;
  969. if (IS_ALIGNED(width, 8)) {
  970. ARGBToARGB1555Row = ARGBToARGB1555Row_NEON;
  971. }
  972. }
  973. #endif
  974. for (y = 0; y < height; ++y) {
  975. ARGBToARGB1555Row(src_argb, dst_argb1555, width);
  976. src_argb += src_stride_argb;
  977. dst_argb1555 += dst_stride_argb1555;
  978. }
  979. return 0;
  980. }
  981. // Convert ARGB To ARGB4444.
  982. LIBYUV_API
  983. int ARGBToARGB4444(const uint8* src_argb, int src_stride_argb,
  984. uint8* dst_argb4444, int dst_stride_argb4444,
  985. int width, int height) {
  986. int y;
  987. void (*ARGBToARGB4444Row)(const uint8* src_argb, uint8* dst_rgb, int width) =
  988. ARGBToARGB4444Row_C;
  989. if (!src_argb || !dst_argb4444 || width <= 0 || height == 0) {
  990. return -1;
  991. }
  992. if (height < 0) {
  993. height = -height;
  994. src_argb = src_argb + (height - 1) * src_stride_argb;
  995. src_stride_argb = -src_stride_argb;
  996. }
  997. // Coalesce rows.
  998. if (src_stride_argb == width * 4 &&
  999. dst_stride_argb4444 == width * 2) {
  1000. width *= height;
  1001. height = 1;
  1002. src_stride_argb = dst_stride_argb4444 = 0;
  1003. }
  1004. #if defined(HAS_ARGBTOARGB4444ROW_SSE2)
  1005. if (TestCpuFlag(kCpuHasSSE2)) {
  1006. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_SSE2;
  1007. if (IS_ALIGNED(width, 4)) {
  1008. ARGBToARGB4444Row = ARGBToARGB4444Row_SSE2;
  1009. }
  1010. }
  1011. #endif
  1012. #if defined(HAS_ARGBTOARGB4444ROW_AVX2)
  1013. if (TestCpuFlag(kCpuHasAVX2)) {
  1014. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_AVX2;
  1015. if (IS_ALIGNED(width, 8)) {
  1016. ARGBToARGB4444Row = ARGBToARGB4444Row_AVX2;
  1017. }
  1018. }
  1019. #endif
  1020. #if defined(HAS_ARGBTOARGB4444ROW_NEON)
  1021. if (TestCpuFlag(kCpuHasNEON)) {
  1022. ARGBToARGB4444Row = ARGBToARGB4444Row_Any_NEON;
  1023. if (IS_ALIGNED(width, 8)) {
  1024. ARGBToARGB4444Row = ARGBToARGB4444Row_NEON;
  1025. }
  1026. }
  1027. #endif
  1028. for (y = 0; y < height; ++y) {
  1029. ARGBToARGB4444Row(src_argb, dst_argb4444, width);
  1030. src_argb += src_stride_argb;
  1031. dst_argb4444 += dst_stride_argb4444;
  1032. }
  1033. return 0;
  1034. }
  1035. // Convert ARGB to J420. (JPeg full range I420).
  1036. LIBYUV_API
  1037. int ARGBToJ420(const uint8* src_argb, int src_stride_argb,
  1038. uint8* dst_yj, int dst_stride_yj,
  1039. uint8* dst_u, int dst_stride_u,
  1040. uint8* dst_v, int dst_stride_v,
  1041. int width, int height) {
  1042. int y;
  1043. void (*ARGBToUVJRow)(const uint8* src_argb0, int src_stride_argb,
  1044. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVJRow_C;
  1045. void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) =
  1046. ARGBToYJRow_C;
  1047. if (!src_argb ||
  1048. !dst_yj || !dst_u || !dst_v ||
  1049. width <= 0 || height == 0) {
  1050. return -1;
  1051. }
  1052. // Negative height means invert the image.
  1053. if (height < 0) {
  1054. height = -height;
  1055. src_argb = src_argb + (height - 1) * src_stride_argb;
  1056. src_stride_argb = -src_stride_argb;
  1057. }
  1058. #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
  1059. if (TestCpuFlag(kCpuHasSSSE3)) {
  1060. ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
  1061. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1062. if (IS_ALIGNED(width, 16)) {
  1063. ARGBToUVJRow = ARGBToUVJRow_SSSE3;
  1064. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1065. }
  1066. }
  1067. #endif
  1068. #if defined(HAS_ARGBTOYJROW_AVX2)
  1069. if (TestCpuFlag(kCpuHasAVX2)) {
  1070. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1071. if (IS_ALIGNED(width, 32)) {
  1072. ARGBToYJRow = ARGBToYJRow_AVX2;
  1073. }
  1074. }
  1075. #endif
  1076. #if defined(HAS_ARGBTOYJROW_NEON)
  1077. if (TestCpuFlag(kCpuHasNEON)) {
  1078. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1079. if (IS_ALIGNED(width, 8)) {
  1080. ARGBToYJRow = ARGBToYJRow_NEON;
  1081. }
  1082. }
  1083. #endif
  1084. #if defined(HAS_ARGBTOUVJROW_NEON)
  1085. if (TestCpuFlag(kCpuHasNEON)) {
  1086. ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
  1087. if (IS_ALIGNED(width, 16)) {
  1088. ARGBToUVJRow = ARGBToUVJRow_NEON;
  1089. }
  1090. }
  1091. #endif
  1092. for (y = 0; y < height - 1; y += 2) {
  1093. ARGBToUVJRow(src_argb, src_stride_argb, dst_u, dst_v, width);
  1094. ARGBToYJRow(src_argb, dst_yj, width);
  1095. ARGBToYJRow(src_argb + src_stride_argb, dst_yj + dst_stride_yj, width);
  1096. src_argb += src_stride_argb * 2;
  1097. dst_yj += dst_stride_yj * 2;
  1098. dst_u += dst_stride_u;
  1099. dst_v += dst_stride_v;
  1100. }
  1101. if (height & 1) {
  1102. ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
  1103. ARGBToYJRow(src_argb, dst_yj, width);
  1104. }
  1105. return 0;
  1106. }
  1107. // Convert ARGB to J422. (JPeg full range I422).
  1108. LIBYUV_API
  1109. int ARGBToJ422(const uint8* src_argb, int src_stride_argb,
  1110. uint8* dst_yj, int dst_stride_yj,
  1111. uint8* dst_u, int dst_stride_u,
  1112. uint8* dst_v, int dst_stride_v,
  1113. int width, int height) {
  1114. int y;
  1115. void (*ARGBToUVJRow)(const uint8* src_argb0, int src_stride_argb,
  1116. uint8* dst_u, uint8* dst_v, int width) = ARGBToUVJRow_C;
  1117. void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) =
  1118. ARGBToYJRow_C;
  1119. if (!src_argb ||
  1120. !dst_yj || !dst_u || !dst_v ||
  1121. width <= 0 || height == 0) {
  1122. return -1;
  1123. }
  1124. // Negative height means invert the image.
  1125. if (height < 0) {
  1126. height = -height;
  1127. src_argb = src_argb + (height - 1) * src_stride_argb;
  1128. src_stride_argb = -src_stride_argb;
  1129. }
  1130. // Coalesce rows.
  1131. if (src_stride_argb == width * 4 &&
  1132. dst_stride_yj == width &&
  1133. dst_stride_u * 2 == width &&
  1134. dst_stride_v * 2 == width) {
  1135. width *= height;
  1136. height = 1;
  1137. src_stride_argb = dst_stride_yj = dst_stride_u = dst_stride_v = 0;
  1138. }
  1139. #if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
  1140. if (TestCpuFlag(kCpuHasSSSE3)) {
  1141. ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
  1142. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1143. if (IS_ALIGNED(width, 16)) {
  1144. ARGBToUVJRow = ARGBToUVJRow_SSSE3;
  1145. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1146. }
  1147. }
  1148. #endif
  1149. #if defined(HAS_ARGBTOYJROW_AVX2)
  1150. if (TestCpuFlag(kCpuHasAVX2)) {
  1151. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1152. if (IS_ALIGNED(width, 32)) {
  1153. ARGBToYJRow = ARGBToYJRow_AVX2;
  1154. }
  1155. }
  1156. #endif
  1157. #if defined(HAS_ARGBTOYJROW_NEON)
  1158. if (TestCpuFlag(kCpuHasNEON)) {
  1159. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1160. if (IS_ALIGNED(width, 8)) {
  1161. ARGBToYJRow = ARGBToYJRow_NEON;
  1162. }
  1163. }
  1164. #endif
  1165. #if defined(HAS_ARGBTOUVJROW_NEON)
  1166. if (TestCpuFlag(kCpuHasNEON)) {
  1167. ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
  1168. if (IS_ALIGNED(width, 16)) {
  1169. ARGBToUVJRow = ARGBToUVJRow_NEON;
  1170. }
  1171. }
  1172. #endif
  1173. for (y = 0; y < height; ++y) {
  1174. ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
  1175. ARGBToYJRow(src_argb, dst_yj, width);
  1176. src_argb += src_stride_argb;
  1177. dst_yj += dst_stride_yj;
  1178. dst_u += dst_stride_u;
  1179. dst_v += dst_stride_v;
  1180. }
  1181. return 0;
  1182. }
  1183. // Convert ARGB to J400.
  1184. LIBYUV_API
  1185. int ARGBToJ400(const uint8* src_argb, int src_stride_argb,
  1186. uint8* dst_yj, int dst_stride_yj,
  1187. int width, int height) {
  1188. int y;
  1189. void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int width) =
  1190. ARGBToYJRow_C;
  1191. if (!src_argb || !dst_yj || width <= 0 || height == 0) {
  1192. return -1;
  1193. }
  1194. if (height < 0) {
  1195. height = -height;
  1196. src_argb = src_argb + (height - 1) * src_stride_argb;
  1197. src_stride_argb = -src_stride_argb;
  1198. }
  1199. // Coalesce rows.
  1200. if (src_stride_argb == width * 4 &&
  1201. dst_stride_yj == width) {
  1202. width *= height;
  1203. height = 1;
  1204. src_stride_argb = dst_stride_yj = 0;
  1205. }
  1206. #if defined(HAS_ARGBTOYJROW_SSSE3)
  1207. if (TestCpuFlag(kCpuHasSSSE3)) {
  1208. ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
  1209. if (IS_ALIGNED(width, 16)) {
  1210. ARGBToYJRow = ARGBToYJRow_SSSE3;
  1211. }
  1212. }
  1213. #endif
  1214. #if defined(HAS_ARGBTOYJROW_AVX2)
  1215. if (TestCpuFlag(kCpuHasAVX2)) {
  1216. ARGBToYJRow = ARGBToYJRow_Any_AVX2;
  1217. if (IS_ALIGNED(width, 32)) {
  1218. ARGBToYJRow = ARGBToYJRow_AVX2;
  1219. }
  1220. }
  1221. #endif
  1222. #if defined(HAS_ARGBTOYJROW_NEON)
  1223. if (TestCpuFlag(kCpuHasNEON)) {
  1224. ARGBToYJRow = ARGBToYJRow_Any_NEON;
  1225. if (IS_ALIGNED(width, 8)) {
  1226. ARGBToYJRow = ARGBToYJRow_NEON;
  1227. }
  1228. }
  1229. #endif
  1230. for (y = 0; y < height; ++y) {
  1231. ARGBToYJRow(src_argb, dst_yj, width);
  1232. src_argb += src_stride_argb;
  1233. dst_yj += dst_stride_yj;
  1234. }
  1235. return 0;
  1236. }
  1237. #ifdef __cplusplus
  1238. } // extern "C"
  1239. } // namespace libyuv
  1240. #endif