jsimd_arm64_neon.S 138 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426
  1. /*
  2. * ARMv8 NEON optimizations for libjpeg-turbo
  3. *
  4. * Copyright (C) 2009-2011, Nokia Corporation and/or its subsidiary(-ies).
  5. * All Rights Reserved.
  6. * Author: Siarhei Siamashka <siarhei.siamashka@nokia.com>
  7. * Copyright (C) 2013-2014, Linaro Limited. All Rights Reserved.
  8. * Author: Ragesh Radhakrishnan <ragesh.r@linaro.org>
  9. * Copyright (C) 2014-2016, D. R. Commander. All Rights Reserved.
  10. * Copyright (C) 2015-2016, Matthieu Darbois. All Rights Reserved.
  11. * Copyright (C) 2016, Siarhei Siamashka. All Rights Reserved.
  12. *
  13. * This software is provided 'as-is', without any express or implied
  14. * warranty. In no event will the authors be held liable for any damages
  15. * arising from the use of this software.
  16. *
  17. * Permission is granted to anyone to use this software for any purpose,
  18. * including commercial applications, and to alter it and redistribute it
  19. * freely, subject to the following restrictions:
  20. *
  21. * 1. The origin of this software must not be misrepresented; you must not
  22. * claim that you wrote the original software. If you use this software
  23. * in a product, an acknowledgment in the product documentation would be
  24. * appreciated but is not required.
  25. * 2. Altered source versions must be plainly marked as such, and must not be
  26. * misrepresented as being the original software.
  27. * 3. This notice may not be removed or altered from any source distribution.
  28. */
  29. #if defined(__linux__) && defined(__ELF__)
  30. .section .note.GNU-stack, "", %progbits /* mark stack as non-executable */
  31. #endif
  32. .text
  33. #define RESPECT_STRICT_ALIGNMENT 1
  34. /*****************************************************************************/
  35. /* Supplementary macro for setting function attributes */
  36. .macro asm_function fname
  37. #ifdef __APPLE__
  38. .globl _\fname
  39. _\fname:
  40. #else
  41. .global \fname
  42. #ifdef __ELF__
  43. .hidden \fname
  44. .type \fname, %function
  45. #endif
  46. \fname:
  47. #endif
  48. .endm
  49. /* Transpose elements of single 128 bit registers */
  50. .macro transpose_single x0, x1, xi, xilen, literal
  51. ins \xi\xilen[0], \x0\xilen[0]
  52. ins \x1\xilen[0], \x0\xilen[1]
  53. trn1 \x0\literal, \x0\literal, \x1\literal
  54. trn2 \x1\literal, \xi\literal, \x1\literal
  55. .endm
  56. /* Transpose elements of 2 differnet registers */
  57. .macro transpose x0, x1, xi, xilen, literal
  58. mov \xi\xilen, \x0\xilen
  59. trn1 \x0\literal, \x0\literal, \x1\literal
  60. trn2 \x1\literal, \xi\literal, \x1\literal
  61. .endm
  62. /* Transpose a block of 4x4 coefficients in four 64-bit registers */
  63. .macro transpose_4x4_32 x0, x0len, x1, x1len, x2, x2len, x3, x3len, xi, xilen
  64. mov \xi\xilen, \x0\xilen
  65. trn1 \x0\x0len, \x0\x0len, \x2\x2len
  66. trn2 \x2\x2len, \xi\x0len, \x2\x2len
  67. mov \xi\xilen, \x1\xilen
  68. trn1 \x1\x1len, \x1\x1len, \x3\x3len
  69. trn2 \x3\x3len, \xi\x1len, \x3\x3len
  70. .endm
  71. .macro transpose_4x4_16 x0, x0len, x1, x1len, x2, x2len, x3, x3len, xi, xilen
  72. mov \xi\xilen, \x0\xilen
  73. trn1 \x0\x0len, \x0\x0len, \x1\x1len
  74. trn2 \x1\x2len, \xi\x0len, \x1\x2len
  75. mov \xi\xilen, \x2\xilen
  76. trn1 \x2\x2len, \x2\x2len, \x3\x3len
  77. trn2 \x3\x2len, \xi\x1len, \x3\x3len
  78. .endm
  79. .macro transpose_4x4 x0, x1, x2, x3, x5
  80. transpose_4x4_16 \x0, .4h, \x1, .4h, \x2, .4h, \x3, .4h, \x5, .16b
  81. transpose_4x4_32 \x0, .2s, \x1, .2s, \x2, .2s, \x3, .2s, \x5, .16b
  82. .endm
  83. .macro transpose_8x8 l0, l1, l2, l3, l4, l5, l6, l7, t0, t1, t2, t3
  84. trn1 \t0\().8h, \l0\().8h, \l1\().8h
  85. trn1 \t1\().8h, \l2\().8h, \l3\().8h
  86. trn1 \t2\().8h, \l4\().8h, \l5\().8h
  87. trn1 \t3\().8h, \l6\().8h, \l7\().8h
  88. trn2 \l1\().8h, \l0\().8h, \l1\().8h
  89. trn2 \l3\().8h, \l2\().8h, \l3\().8h
  90. trn2 \l5\().8h, \l4\().8h, \l5\().8h
  91. trn2 \l7\().8h, \l6\().8h, \l7\().8h
  92. trn1 \l4\().4s, \t2\().4s, \t3\().4s
  93. trn2 \t3\().4s, \t2\().4s, \t3\().4s
  94. trn1 \t2\().4s, \t0\().4s, \t1\().4s
  95. trn2 \l2\().4s, \t0\().4s, \t1\().4s
  96. trn1 \t0\().4s, \l1\().4s, \l3\().4s
  97. trn2 \l3\().4s, \l1\().4s, \l3\().4s
  98. trn2 \t1\().4s, \l5\().4s, \l7\().4s
  99. trn1 \l5\().4s, \l5\().4s, \l7\().4s
  100. trn2 \l6\().2d, \l2\().2d, \t3\().2d
  101. trn1 \l0\().2d, \t2\().2d, \l4\().2d
  102. trn1 \l1\().2d, \t0\().2d, \l5\().2d
  103. trn2 \l7\().2d, \l3\().2d, \t1\().2d
  104. trn1 \l2\().2d, \l2\().2d, \t3\().2d
  105. trn2 \l4\().2d, \t2\().2d, \l4\().2d
  106. trn1 \l3\().2d, \l3\().2d, \t1\().2d
  107. trn2 \l5\().2d, \t0\().2d, \l5\().2d
  108. .endm
  109. #define CENTERJSAMPLE 128
  110. /*****************************************************************************/
  111. /*
  112. * Perform dequantization and inverse DCT on one block of coefficients.
  113. *
  114. * GLOBAL(void)
  115. * jsimd_idct_islow_neon (void *dct_table, JCOEFPTR coef_block,
  116. * JSAMPARRAY output_buf, JDIMENSION output_col)
  117. */
  118. #define CONST_BITS 13
  119. #define PASS1_BITS 2
  120. #define F_0_298 2446 /* FIX(0.298631336) */
  121. #define F_0_390 3196 /* FIX(0.390180644) */
  122. #define F_0_541 4433 /* FIX(0.541196100) */
  123. #define F_0_765 6270 /* FIX(0.765366865) */
  124. #define F_0_899 7373 /* FIX(0.899976223) */
  125. #define F_1_175 9633 /* FIX(1.175875602) */
  126. #define F_1_501 12299 /* FIX(1.501321110) */
  127. #define F_1_847 15137 /* FIX(1.847759065) */
  128. #define F_1_961 16069 /* FIX(1.961570560) */
  129. #define F_2_053 16819 /* FIX(2.053119869) */
  130. #define F_2_562 20995 /* FIX(2.562915447) */
  131. #define F_3_072 25172 /* FIX(3.072711026) */
  132. .balign 16
  133. Ljsimd_idct_islow_neon_consts:
  134. .short F_0_298
  135. .short -F_0_390
  136. .short F_0_541
  137. .short F_0_765
  138. .short - F_0_899
  139. .short F_1_175
  140. .short F_1_501
  141. .short - F_1_847
  142. .short - F_1_961
  143. .short F_2_053
  144. .short - F_2_562
  145. .short F_3_072
  146. .short 0 /* padding */
  147. .short 0
  148. .short 0
  149. .short 0
  150. #undef F_0_298
  151. #undef F_0_390
  152. #undef F_0_541
  153. #undef F_0_765
  154. #undef F_0_899
  155. #undef F_1_175
  156. #undef F_1_501
  157. #undef F_1_847
  158. #undef F_1_961
  159. #undef F_2_053
  160. #undef F_2_562
  161. #undef F_3_072
  162. #define XFIX_P_0_298 v0.h[0]
  163. #define XFIX_N_0_390 v0.h[1]
  164. #define XFIX_P_0_541 v0.h[2]
  165. #define XFIX_P_0_765 v0.h[3]
  166. #define XFIX_N_0_899 v0.h[4]
  167. #define XFIX_P_1_175 v0.h[5]
  168. #define XFIX_P_1_501 v0.h[6]
  169. #define XFIX_N_1_847 v0.h[7]
  170. #define XFIX_N_1_961 v1.h[0]
  171. #define XFIX_P_2_053 v1.h[1]
  172. #define XFIX_N_2_562 v1.h[2]
  173. #define XFIX_P_3_072 v1.h[3]
  174. asm_function jsimd_idct_islow_neon
  175. DCT_TABLE .req x0
  176. COEF_BLOCK .req x1
  177. OUTPUT_BUF .req x2
  178. OUTPUT_COL .req x3
  179. TMP1 .req x0
  180. TMP2 .req x1
  181. TMP3 .req x9
  182. TMP4 .req x10
  183. TMP5 .req x11
  184. TMP6 .req x12
  185. TMP7 .req x13
  186. TMP8 .req x14
  187. /* OUTPUT_COL is a JDIMENSION (unsigned int) argument, so the ABI doesn't
  188. guarantee that the upper (unused) 32 bits of x3 are valid. This
  189. instruction ensures that those bits are set to zero. */
  190. uxtw x3, w3
  191. sub sp, sp, #64
  192. adr x15, Ljsimd_idct_islow_neon_consts
  193. mov x10, sp
  194. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x10], #32
  195. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x10], #32
  196. ld1 {v0.8h, v1.8h}, [x15]
  197. ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [COEF_BLOCK], #64
  198. ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [DCT_TABLE], #64
  199. ld1 {v6.8h, v7.8h, v8.8h, v9.8h}, [COEF_BLOCK], #64
  200. ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [DCT_TABLE], #64
  201. cmeq v16.8h, v3.8h, #0
  202. cmeq v26.8h, v4.8h, #0
  203. cmeq v27.8h, v5.8h, #0
  204. cmeq v28.8h, v6.8h, #0
  205. cmeq v29.8h, v7.8h, #0
  206. cmeq v30.8h, v8.8h, #0
  207. cmeq v31.8h, v9.8h, #0
  208. and v10.16b, v16.16b, v26.16b
  209. and v11.16b, v27.16b, v28.16b
  210. and v12.16b, v29.16b, v30.16b
  211. and v13.16b, v31.16b, v10.16b
  212. and v14.16b, v11.16b, v12.16b
  213. mul v2.8h, v2.8h, v18.8h
  214. and v15.16b, v13.16b, v14.16b
  215. shl v10.8h, v2.8h, #(PASS1_BITS)
  216. sqxtn v16.8b, v15.8h
  217. mov TMP1, v16.d[0]
  218. mvn TMP2, TMP1
  219. cbnz TMP2, 2f
  220. /* case all AC coeffs are zeros */
  221. dup v2.2d, v10.d[0]
  222. dup v6.2d, v10.d[1]
  223. mov v3.16b, v2.16b
  224. mov v7.16b, v6.16b
  225. mov v4.16b, v2.16b
  226. mov v8.16b, v6.16b
  227. mov v5.16b, v2.16b
  228. mov v9.16b, v6.16b
  229. 1:
  230. /* for this transpose, we should organise data like this:
  231. * 00, 01, 02, 03, 40, 41, 42, 43
  232. * 10, 11, 12, 13, 50, 51, 52, 53
  233. * 20, 21, 22, 23, 60, 61, 62, 63
  234. * 30, 31, 32, 33, 70, 71, 72, 73
  235. * 04, 05, 06, 07, 44, 45, 46, 47
  236. * 14, 15, 16, 17, 54, 55, 56, 57
  237. * 24, 25, 26, 27, 64, 65, 66, 67
  238. * 34, 35, 36, 37, 74, 75, 76, 77
  239. */
  240. trn1 v28.8h, v2.8h, v3.8h
  241. trn1 v29.8h, v4.8h, v5.8h
  242. trn1 v30.8h, v6.8h, v7.8h
  243. trn1 v31.8h, v8.8h, v9.8h
  244. trn2 v16.8h, v2.8h, v3.8h
  245. trn2 v17.8h, v4.8h, v5.8h
  246. trn2 v18.8h, v6.8h, v7.8h
  247. trn2 v19.8h, v8.8h, v9.8h
  248. trn1 v2.4s, v28.4s, v29.4s
  249. trn1 v6.4s, v30.4s, v31.4s
  250. trn1 v3.4s, v16.4s, v17.4s
  251. trn1 v7.4s, v18.4s, v19.4s
  252. trn2 v4.4s, v28.4s, v29.4s
  253. trn2 v8.4s, v30.4s, v31.4s
  254. trn2 v5.4s, v16.4s, v17.4s
  255. trn2 v9.4s, v18.4s, v19.4s
  256. /* Even part: reverse the even part of the forward DCT. */
  257. add v18.8h, v4.8h, v8.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
  258. add v22.8h, v2.8h, v6.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  259. smull2 v19.4s, v18.8h, XFIX_P_0_541 /* z1h z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  260. sub v26.8h, v2.8h, v6.8h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  261. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1l z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  262. sshll2 v23.4s, v22.8h, #(CONST_BITS) /* tmp0h tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  263. mov v21.16b, v19.16b /* tmp3 = z1 */
  264. mov v20.16b, v18.16b /* tmp3 = z1 */
  265. smlal2 v19.4s, v8.8h, XFIX_N_1_847 /* tmp2h tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); */
  266. smlal v18.4s, v8.4h, XFIX_N_1_847 /* tmp2l tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); */
  267. sshll2 v27.4s, v26.8h, #(CONST_BITS) /* tmp1h tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  268. smlal2 v21.4s, v4.8h, XFIX_P_0_765 /* tmp3h tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  269. smlal v20.4s, v4.4h, XFIX_P_0_765 /* tmp3l tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  270. sshll v22.4s, v22.4h, #(CONST_BITS) /* tmp0l tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  271. sshll v26.4s, v26.4h, #(CONST_BITS) /* tmp1l tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  272. add v2.4s, v22.4s, v20.4s /* tmp10l tmp10 = tmp0 + tmp3; */
  273. sub v6.4s, v22.4s, v20.4s /* tmp13l tmp13 = tmp0 - tmp3; */
  274. add v8.4s, v26.4s, v18.4s /* tmp11l tmp11 = tmp1 + tmp2; */
  275. sub v4.4s, v26.4s, v18.4s /* tmp12l tmp12 = tmp1 - tmp2; */
  276. add v28.4s, v23.4s, v21.4s /* tmp10h tmp10 = tmp0 + tmp3; */
  277. sub v31.4s, v23.4s, v21.4s /* tmp13h tmp13 = tmp0 - tmp3; */
  278. add v29.4s, v27.4s, v19.4s /* tmp11h tmp11 = tmp1 + tmp2; */
  279. sub v30.4s, v27.4s, v19.4s /* tmp12h tmp12 = tmp1 - tmp2; */
  280. /* Odd part per figure 8; the matrix is unitary and hence its
  281. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  282. */
  283. add v22.8h, v9.8h, v5.8h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  284. add v24.8h, v7.8h, v3.8h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  285. add v18.8h, v9.8h, v3.8h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  286. add v20.8h, v7.8h, v5.8h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  287. add v26.8h, v22.8h, v24.8h /* z5 = z3 + z4 */
  288. smull2 v11.4s, v9.8h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  289. smull2 v13.4s, v7.8h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  290. smull2 v15.4s, v5.8h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  291. smull2 v17.4s, v3.8h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  292. smull2 v27.4s, v26.8h, XFIX_P_1_175 /* z5h z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  293. smull2 v23.4s, v22.8h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, - FIX_1_961570560) */
  294. smull2 v25.4s, v24.8h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, - FIX_0_390180644) */
  295. smull2 v19.4s, v18.8h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, - FIX_0_899976223) */
  296. smull2 v21.4s, v20.8h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, - FIX_2_562915447) */
  297. smull v10.4s, v9.4h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  298. smull v12.4s, v7.4h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  299. smull v14.4s, v5.4h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  300. smull v16.4s, v3.4h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  301. smull v26.4s, v26.4h, XFIX_P_1_175 /* z5l z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  302. smull v22.4s, v22.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, - FIX_1_961570560) */
  303. smull v24.4s, v24.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, - FIX_0_390180644) */
  304. smull v18.4s, v18.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, - FIX_0_899976223) */
  305. smull v20.4s, v20.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, - FIX_2_562915447) */
  306. add v23.4s, v23.4s, v27.4s /* z3 += z5 */
  307. add v22.4s, v22.4s, v26.4s /* z3 += z5 */
  308. add v25.4s, v25.4s, v27.4s /* z4 += z5 */
  309. add v24.4s, v24.4s, v26.4s /* z4 += z5 */
  310. add v11.4s, v11.4s, v19.4s /* tmp0 += z1 */
  311. add v10.4s, v10.4s, v18.4s /* tmp0 += z1 */
  312. add v13.4s, v13.4s, v21.4s /* tmp1 += z2 */
  313. add v12.4s, v12.4s, v20.4s /* tmp1 += z2 */
  314. add v15.4s, v15.4s, v21.4s /* tmp2 += z2 */
  315. add v14.4s, v14.4s, v20.4s /* tmp2 += z2 */
  316. add v17.4s, v17.4s, v19.4s /* tmp3 += z1 */
  317. add v16.4s, v16.4s, v18.4s /* tmp3 += z1 */
  318. add v11.4s, v11.4s, v23.4s /* tmp0 += z3 */
  319. add v10.4s, v10.4s, v22.4s /* tmp0 += z3 */
  320. add v13.4s, v13.4s, v25.4s /* tmp1 += z4 */
  321. add v12.4s, v12.4s, v24.4s /* tmp1 += z4 */
  322. add v17.4s, v17.4s, v25.4s /* tmp3 += z4 */
  323. add v16.4s, v16.4s, v24.4s /* tmp3 += z4 */
  324. add v15.4s, v15.4s, v23.4s /* tmp2 += z3 */
  325. add v14.4s, v14.4s, v22.4s /* tmp2 += z3 */
  326. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  327. add v18.4s, v2.4s, v16.4s /* tmp10 + tmp3 */
  328. add v19.4s, v28.4s, v17.4s /* tmp10 + tmp3 */
  329. sub v20.4s, v2.4s, v16.4s /* tmp10 - tmp3 */
  330. sub v21.4s, v28.4s, v17.4s /* tmp10 - tmp3 */
  331. add v22.4s, v8.4s, v14.4s /* tmp11 + tmp2 */
  332. add v23.4s, v29.4s, v15.4s /* tmp11 + tmp2 */
  333. sub v24.4s, v8.4s, v14.4s /* tmp11 - tmp2 */
  334. sub v25.4s, v29.4s, v15.4s /* tmp11 - tmp2 */
  335. add v26.4s, v4.4s, v12.4s /* tmp12 + tmp1 */
  336. add v27.4s, v30.4s, v13.4s /* tmp12 + tmp1 */
  337. sub v28.4s, v4.4s, v12.4s /* tmp12 - tmp1 */
  338. sub v29.4s, v30.4s, v13.4s /* tmp12 - tmp1 */
  339. add v14.4s, v6.4s, v10.4s /* tmp13 + tmp0 */
  340. add v15.4s, v31.4s, v11.4s /* tmp13 + tmp0 */
  341. sub v16.4s, v6.4s, v10.4s /* tmp13 - tmp0 */
  342. sub v17.4s, v31.4s, v11.4s /* tmp13 - tmp0 */
  343. shrn v2.4h, v18.4s, #16 /* wsptr[DCTSIZE*0] = (int) DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3) */
  344. shrn v9.4h, v20.4s, #16 /* wsptr[DCTSIZE*7] = (int) DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3) */
  345. shrn v3.4h, v22.4s, #16 /* wsptr[DCTSIZE*1] = (int) DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3) */
  346. shrn v8.4h, v24.4s, #16 /* wsptr[DCTSIZE*6] = (int) DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3) */
  347. shrn v4.4h, v26.4s, #16 /* wsptr[DCTSIZE*2] = (int) DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3) */
  348. shrn v7.4h, v28.4s, #16 /* wsptr[DCTSIZE*5] = (int) DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3) */
  349. shrn v5.4h, v14.4s, #16 /* wsptr[DCTSIZE*3] = (int) DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3) */
  350. shrn v6.4h, v16.4s, #16 /* wsptr[DCTSIZE*4] = (int) DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3) */
  351. shrn2 v2.8h, v19.4s, #16 /* wsptr[DCTSIZE*0] = (int) DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3) */
  352. shrn2 v9.8h, v21.4s, #16 /* wsptr[DCTSIZE*7] = (int) DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3) */
  353. shrn2 v3.8h, v23.4s, #16 /* wsptr[DCTSIZE*1] = (int) DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3) */
  354. shrn2 v8.8h, v25.4s, #16 /* wsptr[DCTSIZE*6] = (int) DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3) */
  355. shrn2 v4.8h, v27.4s, #16 /* wsptr[DCTSIZE*2] = (int) DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3) */
  356. shrn2 v7.8h, v29.4s, #16 /* wsptr[DCTSIZE*5] = (int) DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3) */
  357. shrn2 v5.8h, v15.4s, #16 /* wsptr[DCTSIZE*3] = (int) DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3) */
  358. shrn2 v6.8h, v17.4s, #16 /* wsptr[DCTSIZE*4] = (int) DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3) */
  359. movi v0.16b, #(CENTERJSAMPLE)
  360. /* Prepare pointers (dual-issue with NEON instructions) */
  361. ldp TMP1, TMP2, [OUTPUT_BUF], 16
  362. sqrshrn v28.8b, v2.8h, #(CONST_BITS+PASS1_BITS+3-16)
  363. ldp TMP3, TMP4, [OUTPUT_BUF], 16
  364. sqrshrn v29.8b, v3.8h, #(CONST_BITS+PASS1_BITS+3-16)
  365. add TMP1, TMP1, OUTPUT_COL
  366. sqrshrn v30.8b, v4.8h, #(CONST_BITS+PASS1_BITS+3-16)
  367. add TMP2, TMP2, OUTPUT_COL
  368. sqrshrn v31.8b, v5.8h, #(CONST_BITS+PASS1_BITS+3-16)
  369. add TMP3, TMP3, OUTPUT_COL
  370. sqrshrn2 v28.16b, v6.8h, #(CONST_BITS+PASS1_BITS+3-16)
  371. add TMP4, TMP4, OUTPUT_COL
  372. sqrshrn2 v29.16b, v7.8h, #(CONST_BITS+PASS1_BITS+3-16)
  373. ldp TMP5, TMP6, [OUTPUT_BUF], 16
  374. sqrshrn2 v30.16b, v8.8h, #(CONST_BITS+PASS1_BITS+3-16)
  375. ldp TMP7, TMP8, [OUTPUT_BUF], 16
  376. sqrshrn2 v31.16b, v9.8h, #(CONST_BITS+PASS1_BITS+3-16)
  377. add TMP5, TMP5, OUTPUT_COL
  378. add v16.16b, v28.16b, v0.16b
  379. add TMP6, TMP6, OUTPUT_COL
  380. add v18.16b, v29.16b, v0.16b
  381. add TMP7, TMP7, OUTPUT_COL
  382. add v20.16b, v30.16b, v0.16b
  383. add TMP8, TMP8, OUTPUT_COL
  384. add v22.16b, v31.16b, v0.16b
  385. /* Transpose the final 8-bit samples */
  386. trn1 v28.16b, v16.16b, v18.16b
  387. trn1 v30.16b, v20.16b, v22.16b
  388. trn2 v29.16b, v16.16b, v18.16b
  389. trn2 v31.16b, v20.16b, v22.16b
  390. trn1 v16.8h, v28.8h, v30.8h
  391. trn2 v18.8h, v28.8h, v30.8h
  392. trn1 v20.8h, v29.8h, v31.8h
  393. trn2 v22.8h, v29.8h, v31.8h
  394. uzp1 v28.4s, v16.4s, v18.4s
  395. uzp2 v30.4s, v16.4s, v18.4s
  396. uzp1 v29.4s, v20.4s, v22.4s
  397. uzp2 v31.4s, v20.4s, v22.4s
  398. /* Store results to the output buffer */
  399. st1 {v28.d}[0], [TMP1]
  400. st1 {v29.d}[0], [TMP2]
  401. st1 {v28.d}[1], [TMP3]
  402. st1 {v29.d}[1], [TMP4]
  403. st1 {v30.d}[0], [TMP5]
  404. st1 {v31.d}[0], [TMP6]
  405. st1 {v30.d}[1], [TMP7]
  406. st1 {v31.d}[1], [TMP8]
  407. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], #32
  408. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], #32
  409. blr x30
  410. .balign 16
  411. 2:
  412. mul v3.8h, v3.8h, v19.8h
  413. mul v4.8h, v4.8h, v20.8h
  414. mul v5.8h, v5.8h, v21.8h
  415. add TMP4, xzr, TMP2, LSL #32
  416. mul v6.8h, v6.8h, v22.8h
  417. mul v7.8h, v7.8h, v23.8h
  418. adds TMP3, xzr, TMP2, LSR #32
  419. mul v8.8h, v8.8h, v24.8h
  420. mul v9.8h, v9.8h, v25.8h
  421. b.ne 3f
  422. /* Right AC coef is zero */
  423. dup v15.2d, v10.d[1]
  424. /* Even part: reverse the even part of the forward DCT. */
  425. add v18.4h, v4.4h, v8.4h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
  426. add v22.4h, v2.4h, v6.4h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  427. sub v26.4h, v2.4h, v6.4h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  428. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1l z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  429. sshll v22.4s, v22.4h, #(CONST_BITS) /* tmp0l tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  430. mov v20.16b, v18.16b /* tmp3 = z1 */
  431. sshll v26.4s, v26.4h, #(CONST_BITS) /* tmp1l tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  432. smlal v18.4s, v8.4h, XFIX_N_1_847 /* tmp2l tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); */
  433. smlal v20.4s, v4.4h, XFIX_P_0_765 /* tmp3l tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  434. add v2.4s, v22.4s, v20.4s /* tmp10l tmp10 = tmp0 + tmp3; */
  435. sub v6.4s, v22.4s, v20.4s /* tmp13l tmp13 = tmp0 - tmp3; */
  436. add v8.4s, v26.4s, v18.4s /* tmp11l tmp11 = tmp1 + tmp2; */
  437. sub v4.4s, v26.4s, v18.4s /* tmp12l tmp12 = tmp1 - tmp2; */
  438. /* Odd part per figure 8; the matrix is unitary and hence its
  439. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  440. */
  441. add v22.4h, v9.4h, v5.4h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  442. add v24.4h, v7.4h, v3.4h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  443. add v18.4h, v9.4h, v3.4h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  444. add v20.4h, v7.4h, v5.4h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  445. add v26.4h, v22.4h, v24.4h /* z5 = z3 + z4 */
  446. smull v10.4s, v9.4h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  447. smull v12.4s, v7.4h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  448. smull v14.4s, v5.4h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  449. smull v16.4s, v3.4h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  450. smull v26.4s, v26.4h, XFIX_P_1_175 /* z5l z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  451. smull v22.4s, v22.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, - FIX_1_961570560) */
  452. smull v24.4s, v24.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, - FIX_0_390180644) */
  453. smull v18.4s, v18.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, - FIX_0_899976223) */
  454. smull v20.4s, v20.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, - FIX_2_562915447) */
  455. add v22.4s, v22.4s, v26.4s /* z3 += z5 */
  456. add v24.4s, v24.4s, v26.4s /* z4 += z5 */
  457. add v10.4s, v10.4s, v18.4s /* tmp0 += z1 */
  458. add v12.4s, v12.4s, v20.4s /* tmp1 += z2 */
  459. add v14.4s, v14.4s, v20.4s /* tmp2 += z2 */
  460. add v16.4s, v16.4s, v18.4s /* tmp3 += z1 */
  461. add v10.4s, v10.4s, v22.4s /* tmp0 += z3 */
  462. add v12.4s, v12.4s, v24.4s /* tmp1 += z4 */
  463. add v16.4s, v16.4s, v24.4s /* tmp3 += z4 */
  464. add v14.4s, v14.4s, v22.4s /* tmp2 += z3 */
  465. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  466. add v18.4s, v2.4s, v16.4s /* tmp10 + tmp3 */
  467. sub v20.4s, v2.4s, v16.4s /* tmp10 - tmp3 */
  468. add v22.4s, v8.4s, v14.4s /* tmp11 + tmp2 */
  469. sub v24.4s, v8.4s, v14.4s /* tmp11 - tmp2 */
  470. add v26.4s, v4.4s, v12.4s /* tmp12 + tmp1 */
  471. sub v28.4s, v4.4s, v12.4s /* tmp12 - tmp1 */
  472. add v14.4s, v6.4s, v10.4s /* tmp13 + tmp0 */
  473. sub v16.4s, v6.4s, v10.4s /* tmp13 - tmp0 */
  474. rshrn v2.4h, v18.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*0] = (int) DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
  475. rshrn v3.4h, v22.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*1] = (int) DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
  476. rshrn v4.4h, v26.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*2] = (int) DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
  477. rshrn v5.4h, v14.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*3] = (int) DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
  478. rshrn2 v2.8h, v16.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*4] = (int) DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
  479. rshrn2 v3.8h, v28.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*5] = (int) DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
  480. rshrn2 v4.8h, v24.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*6] = (int) DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
  481. rshrn2 v5.8h, v20.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*7] = (int) DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
  482. mov v6.16b, v15.16b
  483. mov v7.16b, v15.16b
  484. mov v8.16b, v15.16b
  485. mov v9.16b, v15.16b
  486. b 1b
  487. .balign 16
  488. 3:
  489. cbnz TMP4, 4f
  490. /* Left AC coef is zero */
  491. dup v14.2d, v10.d[0]
  492. /* Even part: reverse the even part of the forward DCT. */
  493. add v18.8h, v4.8h, v8.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
  494. add v22.8h, v2.8h, v6.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  495. smull2 v19.4s, v18.8h, XFIX_P_0_541 /* z1h z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  496. sub v26.8h, v2.8h, v6.8h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  497. sshll2 v23.4s, v22.8h, #(CONST_BITS) /* tmp0h tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  498. mov v21.16b, v19.16b /* tmp3 = z1 */
  499. smlal2 v19.4s, v8.8h, XFIX_N_1_847 /* tmp2h tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); */
  500. sshll2 v27.4s, v26.8h, #(CONST_BITS) /* tmp1h tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  501. smlal2 v21.4s, v4.8h, XFIX_P_0_765 /* tmp3h tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  502. add v28.4s, v23.4s, v21.4s /* tmp10h tmp10 = tmp0 + tmp3; */
  503. sub v31.4s, v23.4s, v21.4s /* tmp13h tmp13 = tmp0 - tmp3; */
  504. add v29.4s, v27.4s, v19.4s /* tmp11h tmp11 = tmp1 + tmp2; */
  505. sub v30.4s, v27.4s, v19.4s /* tmp12h tmp12 = tmp1 - tmp2; */
  506. /* Odd part per figure 8; the matrix is unitary and hence its
  507. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  508. */
  509. add v22.8h, v9.8h, v5.8h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  510. add v24.8h, v7.8h, v3.8h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  511. add v18.8h, v9.8h, v3.8h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  512. add v20.8h, v7.8h, v5.8h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  513. add v26.8h, v22.8h, v24.8h /* z5 = z3 + z4 */
  514. smull2 v11.4s, v9.8h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  515. smull2 v13.4s, v7.8h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  516. smull2 v15.4s, v5.8h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  517. smull2 v17.4s, v3.8h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  518. smull2 v27.4s, v26.8h, XFIX_P_1_175 /* z5h z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  519. smull2 v23.4s, v22.8h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, - FIX_1_961570560) */
  520. smull2 v25.4s, v24.8h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, - FIX_0_390180644) */
  521. smull2 v19.4s, v18.8h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, - FIX_0_899976223) */
  522. smull2 v21.4s, v20.8h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, - FIX_2_562915447) */
  523. add v23.4s, v23.4s, v27.4s /* z3 += z5 */
  524. add v22.4s, v22.4s, v26.4s /* z3 += z5 */
  525. add v25.4s, v25.4s, v27.4s /* z4 += z5 */
  526. add v24.4s, v24.4s, v26.4s /* z4 += z5 */
  527. add v11.4s, v11.4s, v19.4s /* tmp0 += z1 */
  528. add v13.4s, v13.4s, v21.4s /* tmp1 += z2 */
  529. add v15.4s, v15.4s, v21.4s /* tmp2 += z2 */
  530. add v17.4s, v17.4s, v19.4s /* tmp3 += z1 */
  531. add v11.4s, v11.4s, v23.4s /* tmp0 += z3 */
  532. add v13.4s, v13.4s, v25.4s /* tmp1 += z4 */
  533. add v17.4s, v17.4s, v25.4s /* tmp3 += z4 */
  534. add v15.4s, v15.4s, v23.4s /* tmp2 += z3 */
  535. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  536. add v19.4s, v28.4s, v17.4s /* tmp10 + tmp3 */
  537. sub v21.4s, v28.4s, v17.4s /* tmp10 - tmp3 */
  538. add v23.4s, v29.4s, v15.4s /* tmp11 + tmp2 */
  539. sub v25.4s, v29.4s, v15.4s /* tmp11 - tmp2 */
  540. add v27.4s, v30.4s, v13.4s /* tmp12 + tmp1 */
  541. sub v29.4s, v30.4s, v13.4s /* tmp12 - tmp1 */
  542. add v15.4s, v31.4s, v11.4s /* tmp13 + tmp0 */
  543. sub v17.4s, v31.4s, v11.4s /* tmp13 - tmp0 */
  544. mov v2.16b, v14.16b
  545. mov v3.16b, v14.16b
  546. mov v4.16b, v14.16b
  547. mov v5.16b, v14.16b
  548. rshrn v6.4h, v19.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*0] = (int) DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
  549. rshrn v7.4h, v23.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*1] = (int) DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
  550. rshrn v8.4h, v27.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*2] = (int) DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
  551. rshrn v9.4h, v15.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*3] = (int) DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
  552. rshrn2 v6.8h, v17.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*4] = (int) DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
  553. rshrn2 v7.8h, v29.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*5] = (int) DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
  554. rshrn2 v8.8h, v25.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*6] = (int) DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
  555. rshrn2 v9.8h, v21.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*7] = (int) DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
  556. b 1b
  557. .balign 16
  558. 4:
  559. /* "No" AC coef is zero */
  560. /* Even part: reverse the even part of the forward DCT. */
  561. add v18.8h, v4.8h, v8.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]) + DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]) */
  562. add v22.8h, v2.8h, v6.8h /* z2 + z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) + DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  563. smull2 v19.4s, v18.8h, XFIX_P_0_541 /* z1h z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  564. sub v26.8h, v2.8h, v6.8h /* z2 - z3 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) - DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]) */
  565. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1l z1 = MULTIPLY(z2 + z3, FIX_0_541196100); */
  566. sshll2 v23.4s, v22.8h, #(CONST_BITS) /* tmp0h tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  567. mov v21.16b, v19.16b /* tmp3 = z1 */
  568. mov v20.16b, v18.16b /* tmp3 = z1 */
  569. smlal2 v19.4s, v8.8h, XFIX_N_1_847 /* tmp2h tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); */
  570. smlal v18.4s, v8.4h, XFIX_N_1_847 /* tmp2l tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); */
  571. sshll2 v27.4s, v26.8h, #(CONST_BITS) /* tmp1h tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  572. smlal2 v21.4s, v4.8h, XFIX_P_0_765 /* tmp3h tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  573. smlal v20.4s, v4.4h, XFIX_P_0_765 /* tmp3l tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); */
  574. sshll v22.4s, v22.4h, #(CONST_BITS) /* tmp0l tmp0 = LEFT_SHIFT(z2 + z3, CONST_BITS); */
  575. sshll v26.4s, v26.4h, #(CONST_BITS) /* tmp1l tmp1 = LEFT_SHIFT(z2 - z3, CONST_BITS); */
  576. add v2.4s, v22.4s, v20.4s /* tmp10l tmp10 = tmp0 + tmp3; */
  577. sub v6.4s, v22.4s, v20.4s /* tmp13l tmp13 = tmp0 - tmp3; */
  578. add v8.4s, v26.4s, v18.4s /* tmp11l tmp11 = tmp1 + tmp2; */
  579. sub v4.4s, v26.4s, v18.4s /* tmp12l tmp12 = tmp1 - tmp2; */
  580. add v28.4s, v23.4s, v21.4s /* tmp10h tmp10 = tmp0 + tmp3; */
  581. sub v31.4s, v23.4s, v21.4s /* tmp13h tmp13 = tmp0 - tmp3; */
  582. add v29.4s, v27.4s, v19.4s /* tmp11h tmp11 = tmp1 + tmp2; */
  583. sub v30.4s, v27.4s, v19.4s /* tmp12h tmp12 = tmp1 - tmp2; */
  584. /* Odd part per figure 8; the matrix is unitary and hence its
  585. * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively.
  586. */
  587. add v22.8h, v9.8h, v5.8h /* z3 = tmp0 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  588. add v24.8h, v7.8h, v3.8h /* z4 = tmp1 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  589. add v18.8h, v9.8h, v3.8h /* z1 = tmp0 + tmp3 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]) + DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]) */
  590. add v20.8h, v7.8h, v5.8h /* z2 = tmp1 + tmp2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]) + DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]) */
  591. add v26.8h, v22.8h, v24.8h /* z5 = z3 + z4 */
  592. smull2 v11.4s, v9.8h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  593. smull2 v13.4s, v7.8h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  594. smull2 v15.4s, v5.8h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  595. smull2 v17.4s, v3.8h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  596. smull2 v27.4s, v26.8h, XFIX_P_1_175 /* z5h z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  597. smull2 v23.4s, v22.8h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, - FIX_1_961570560) */
  598. smull2 v25.4s, v24.8h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, - FIX_0_390180644) */
  599. smull2 v19.4s, v18.8h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, - FIX_0_899976223) */
  600. smull2 v21.4s, v20.8h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, - FIX_2_562915447) */
  601. smull v10.4s, v9.4h, XFIX_P_0_298 /* tmp0 = MULTIPLY(tmp0, FIX_0_298631336) */
  602. smull v12.4s, v7.4h, XFIX_P_2_053 /* tmp1 = MULTIPLY(tmp1, FIX_2_053119869) */
  603. smull v14.4s, v5.4h, XFIX_P_3_072 /* tmp2 = MULTIPLY(tmp2, FIX_3_072711026) */
  604. smull v16.4s, v3.4h, XFIX_P_1_501 /* tmp3 = MULTIPLY(tmp3, FIX_1_501321110) */
  605. smull v26.4s, v26.4h, XFIX_P_1_175 /* z5l z5 = MULTIPLY(z3 + z4, FIX_1_175875602) */
  606. smull v22.4s, v22.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, - FIX_1_961570560) */
  607. smull v24.4s, v24.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, - FIX_0_390180644) */
  608. smull v18.4s, v18.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, - FIX_0_899976223) */
  609. smull v20.4s, v20.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, - FIX_2_562915447) */
  610. add v23.4s, v23.4s, v27.4s /* z3 += z5 */
  611. add v22.4s, v22.4s, v26.4s /* z3 += z5 */
  612. add v25.4s, v25.4s, v27.4s /* z4 += z5 */
  613. add v24.4s, v24.4s, v26.4s /* z4 += z5 */
  614. add v11.4s, v11.4s, v19.4s /* tmp0 += z1 */
  615. add v10.4s, v10.4s, v18.4s /* tmp0 += z1 */
  616. add v13.4s, v13.4s, v21.4s /* tmp1 += z2 */
  617. add v12.4s, v12.4s, v20.4s /* tmp1 += z2 */
  618. add v15.4s, v15.4s, v21.4s /* tmp2 += z2 */
  619. add v14.4s, v14.4s, v20.4s /* tmp2 += z2 */
  620. add v17.4s, v17.4s, v19.4s /* tmp3 += z1 */
  621. add v16.4s, v16.4s, v18.4s /* tmp3 += z1 */
  622. add v11.4s, v11.4s, v23.4s /* tmp0 += z3 */
  623. add v10.4s, v10.4s, v22.4s /* tmp0 += z3 */
  624. add v13.4s, v13.4s, v25.4s /* tmp1 += z4 */
  625. add v12.4s, v12.4s, v24.4s /* tmp1 += z4 */
  626. add v17.4s, v17.4s, v25.4s /* tmp3 += z4 */
  627. add v16.4s, v16.4s, v24.4s /* tmp3 += z4 */
  628. add v15.4s, v15.4s, v23.4s /* tmp2 += z3 */
  629. add v14.4s, v14.4s, v22.4s /* tmp2 += z3 */
  630. /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */
  631. add v18.4s, v2.4s, v16.4s /* tmp10 + tmp3 */
  632. add v19.4s, v28.4s, v17.4s /* tmp10 + tmp3 */
  633. sub v20.4s, v2.4s, v16.4s /* tmp10 - tmp3 */
  634. sub v21.4s, v28.4s, v17.4s /* tmp10 - tmp3 */
  635. add v22.4s, v8.4s, v14.4s /* tmp11 + tmp2 */
  636. add v23.4s, v29.4s, v15.4s /* tmp11 + tmp2 */
  637. sub v24.4s, v8.4s, v14.4s /* tmp11 - tmp2 */
  638. sub v25.4s, v29.4s, v15.4s /* tmp11 - tmp2 */
  639. add v26.4s, v4.4s, v12.4s /* tmp12 + tmp1 */
  640. add v27.4s, v30.4s, v13.4s /* tmp12 + tmp1 */
  641. sub v28.4s, v4.4s, v12.4s /* tmp12 - tmp1 */
  642. sub v29.4s, v30.4s, v13.4s /* tmp12 - tmp1 */
  643. add v14.4s, v6.4s, v10.4s /* tmp13 + tmp0 */
  644. add v15.4s, v31.4s, v11.4s /* tmp13 + tmp0 */
  645. sub v16.4s, v6.4s, v10.4s /* tmp13 - tmp0 */
  646. sub v17.4s, v31.4s, v11.4s /* tmp13 - tmp0 */
  647. rshrn v2.4h, v18.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*0] = (int) DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
  648. rshrn v3.4h, v22.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*1] = (int) DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
  649. rshrn v4.4h, v26.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*2] = (int) DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
  650. rshrn v5.4h, v14.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*3] = (int) DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
  651. rshrn v6.4h, v19.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*0] = (int) DESCALE(tmp10 + tmp3, CONST_BITS-PASS1_BITS) */
  652. rshrn v7.4h, v23.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*1] = (int) DESCALE(tmp11 + tmp2, CONST_BITS-PASS1_BITS) */
  653. rshrn v8.4h, v27.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*2] = (int) DESCALE(tmp12 + tmp1, CONST_BITS-PASS1_BITS) */
  654. rshrn v9.4h, v15.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*3] = (int) DESCALE(tmp13 + tmp0, CONST_BITS-PASS1_BITS) */
  655. rshrn2 v2.8h, v16.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*4] = (int) DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
  656. rshrn2 v3.8h, v28.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*5] = (int) DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
  657. rshrn2 v4.8h, v24.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*6] = (int) DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
  658. rshrn2 v5.8h, v20.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*7] = (int) DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
  659. rshrn2 v6.8h, v17.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*4] = (int) DESCALE(tmp13 - tmp0, CONST_BITS-PASS1_BITS) */
  660. rshrn2 v7.8h, v29.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*5] = (int) DESCALE(tmp12 - tmp1, CONST_BITS-PASS1_BITS) */
  661. rshrn2 v8.8h, v25.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*6] = (int) DESCALE(tmp11 - tmp2, CONST_BITS-PASS1_BITS) */
  662. rshrn2 v9.8h, v21.4s, #(CONST_BITS-PASS1_BITS) /* wsptr[DCTSIZE*7] = (int) DESCALE(tmp10 - tmp3, CONST_BITS-PASS1_BITS) */
  663. b 1b
  664. .unreq DCT_TABLE
  665. .unreq COEF_BLOCK
  666. .unreq OUTPUT_BUF
  667. .unreq OUTPUT_COL
  668. .unreq TMP1
  669. .unreq TMP2
  670. .unreq TMP3
  671. .unreq TMP4
  672. .unreq TMP5
  673. .unreq TMP6
  674. .unreq TMP7
  675. .unreq TMP8
  676. #undef CENTERJSAMPLE
  677. #undef CONST_BITS
  678. #undef PASS1_BITS
  679. #undef XFIX_P_0_298
  680. #undef XFIX_N_0_390
  681. #undef XFIX_P_0_541
  682. #undef XFIX_P_0_765
  683. #undef XFIX_N_0_899
  684. #undef XFIX_P_1_175
  685. #undef XFIX_P_1_501
  686. #undef XFIX_N_1_847
  687. #undef XFIX_N_1_961
  688. #undef XFIX_P_2_053
  689. #undef XFIX_N_2_562
  690. #undef XFIX_P_3_072
  691. /*****************************************************************************/
  692. /*
  693. * jsimd_idct_ifast_neon
  694. *
  695. * This function contains a fast, not so accurate integer implementation of
  696. * the inverse DCT (Discrete Cosine Transform). It uses the same calculations
  697. * and produces exactly the same output as IJG's original 'jpeg_idct_ifast'
  698. * function from jidctfst.c
  699. *
  700. * Normally 1-D AAN DCT needs 5 multiplications and 29 additions.
  701. * But in ARM NEON case some extra additions are required because VQDMULH
  702. * instruction can't handle the constants larger than 1. So the expressions
  703. * like "x * 1.082392200" have to be converted to "x * 0.082392200 + x",
  704. * which introduces an extra addition. Overall, there are 6 extra additions
  705. * per 1-D IDCT pass, totalling to 5 VQDMULH and 35 VADD/VSUB instructions.
  706. */
  707. #define XFIX_1_082392200 v0.h[0]
  708. #define XFIX_1_414213562 v0.h[1]
  709. #define XFIX_1_847759065 v0.h[2]
  710. #define XFIX_2_613125930 v0.h[3]
  711. .balign 16
  712. Ljsimd_idct_ifast_neon_consts:
  713. .short (277 * 128 - 256 * 128) /* XFIX_1_082392200 */
  714. .short (362 * 128 - 256 * 128) /* XFIX_1_414213562 */
  715. .short (473 * 128 - 256 * 128) /* XFIX_1_847759065 */
  716. .short (669 * 128 - 512 * 128) /* XFIX_2_613125930 */
  717. asm_function jsimd_idct_ifast_neon
  718. DCT_TABLE .req x0
  719. COEF_BLOCK .req x1
  720. OUTPUT_BUF .req x2
  721. OUTPUT_COL .req x3
  722. TMP1 .req x0
  723. TMP2 .req x1
  724. TMP3 .req x9
  725. TMP4 .req x10
  726. TMP5 .req x11
  727. TMP6 .req x12
  728. TMP7 .req x13
  729. TMP8 .req x14
  730. /* OUTPUT_COL is a JDIMENSION (unsigned int) argument, so the ABI doesn't
  731. guarantee that the upper (unused) 32 bits of x3 are valid. This
  732. instruction ensures that those bits are set to zero. */
  733. uxtw x3, w3
  734. /* Load and dequantize coefficients into NEON registers
  735. * with the following allocation:
  736. * 0 1 2 3 | 4 5 6 7
  737. * ---------+--------
  738. * 0 | d16 | d17 ( v16.8h )
  739. * 1 | d18 | d19 ( v17.8h )
  740. * 2 | d20 | d21 ( v18.8h )
  741. * 3 | d22 | d23 ( v19.8h )
  742. * 4 | d24 | d25 ( v20.8h )
  743. * 5 | d26 | d27 ( v21.8h )
  744. * 6 | d28 | d29 ( v22.8h )
  745. * 7 | d30 | d31 ( v23.8h )
  746. */
  747. /* Save NEON registers used in fast IDCT */
  748. adr TMP5, Ljsimd_idct_ifast_neon_consts
  749. ld1 {v16.8h, v17.8h}, [COEF_BLOCK], 32
  750. ld1 {v0.8h, v1.8h}, [DCT_TABLE], 32
  751. ld1 {v18.8h, v19.8h}, [COEF_BLOCK], 32
  752. mul v16.8h, v16.8h, v0.8h
  753. ld1 {v2.8h, v3.8h}, [DCT_TABLE], 32
  754. mul v17.8h, v17.8h, v1.8h
  755. ld1 {v20.8h, v21.8h}, [COEF_BLOCK], 32
  756. mul v18.8h, v18.8h, v2.8h
  757. ld1 {v0.8h, v1.8h}, [DCT_TABLE], 32
  758. mul v19.8h, v19.8h, v3.8h
  759. ld1 {v22.8h, v23.8h}, [COEF_BLOCK], 32
  760. mul v20.8h, v20.8h, v0.8h
  761. ld1 {v2.8h, v3.8h}, [DCT_TABLE], 32
  762. mul v22.8h, v22.8h, v2.8h
  763. mul v21.8h, v21.8h, v1.8h
  764. ld1 {v0.4h}, [TMP5] /* load constants */
  765. mul v23.8h, v23.8h, v3.8h
  766. /* 1-D IDCT, pass 1 */
  767. sub v2.8h, v18.8h, v22.8h
  768. add v22.8h, v18.8h, v22.8h
  769. sub v1.8h, v19.8h, v21.8h
  770. add v21.8h, v19.8h, v21.8h
  771. sub v5.8h, v17.8h, v23.8h
  772. add v23.8h, v17.8h, v23.8h
  773. sqdmulh v4.8h, v2.8h, XFIX_1_414213562
  774. sqdmulh v6.8h, v1.8h, XFIX_2_613125930
  775. add v3.8h, v1.8h, v1.8h
  776. sub v1.8h, v5.8h, v1.8h
  777. add v18.8h, v2.8h, v4.8h
  778. sqdmulh v4.8h, v1.8h, XFIX_1_847759065
  779. sub v2.8h, v23.8h, v21.8h
  780. add v3.8h, v3.8h, v6.8h
  781. sqdmulh v6.8h, v2.8h, XFIX_1_414213562
  782. add v1.8h, v1.8h, v4.8h
  783. sqdmulh v4.8h, v5.8h, XFIX_1_082392200
  784. sub v18.8h, v18.8h, v22.8h
  785. add v2.8h, v2.8h, v6.8h
  786. sub v6.8h, v16.8h, v20.8h
  787. add v20.8h, v16.8h, v20.8h
  788. add v17.8h, v5.8h, v4.8h
  789. add v5.8h, v6.8h, v18.8h
  790. sub v18.8h, v6.8h, v18.8h
  791. add v6.8h, v23.8h, v21.8h
  792. add v16.8h, v20.8h, v22.8h
  793. sub v3.8h, v6.8h, v3.8h
  794. sub v20.8h, v20.8h, v22.8h
  795. sub v3.8h, v3.8h, v1.8h
  796. sub v1.8h, v17.8h, v1.8h
  797. add v2.8h, v3.8h, v2.8h
  798. sub v23.8h, v16.8h, v6.8h
  799. add v1.8h, v1.8h, v2.8h
  800. add v16.8h, v16.8h, v6.8h
  801. add v22.8h, v5.8h, v3.8h
  802. sub v17.8h, v5.8h, v3.8h
  803. sub v21.8h, v18.8h, v2.8h
  804. add v18.8h, v18.8h, v2.8h
  805. sub v19.8h, v20.8h, v1.8h
  806. add v20.8h, v20.8h, v1.8h
  807. transpose_8x8 v16, v17, v18, v19, v20, v21, v22, v23, v28, v29, v30, v31
  808. /* 1-D IDCT, pass 2 */
  809. sub v2.8h, v18.8h, v22.8h
  810. add v22.8h, v18.8h, v22.8h
  811. sub v1.8h, v19.8h, v21.8h
  812. add v21.8h, v19.8h, v21.8h
  813. sub v5.8h, v17.8h, v23.8h
  814. add v23.8h, v17.8h, v23.8h
  815. sqdmulh v4.8h, v2.8h, XFIX_1_414213562
  816. sqdmulh v6.8h, v1.8h, XFIX_2_613125930
  817. add v3.8h, v1.8h, v1.8h
  818. sub v1.8h, v5.8h, v1.8h
  819. add v18.8h, v2.8h, v4.8h
  820. sqdmulh v4.8h, v1.8h, XFIX_1_847759065
  821. sub v2.8h, v23.8h, v21.8h
  822. add v3.8h, v3.8h, v6.8h
  823. sqdmulh v6.8h, v2.8h, XFIX_1_414213562
  824. add v1.8h, v1.8h, v4.8h
  825. sqdmulh v4.8h, v5.8h, XFIX_1_082392200
  826. sub v18.8h, v18.8h, v22.8h
  827. add v2.8h, v2.8h, v6.8h
  828. sub v6.8h, v16.8h, v20.8h
  829. add v20.8h, v16.8h, v20.8h
  830. add v17.8h, v5.8h, v4.8h
  831. add v5.8h, v6.8h, v18.8h
  832. sub v18.8h, v6.8h, v18.8h
  833. add v6.8h, v23.8h, v21.8h
  834. add v16.8h, v20.8h, v22.8h
  835. sub v3.8h, v6.8h, v3.8h
  836. sub v20.8h, v20.8h, v22.8h
  837. sub v3.8h, v3.8h, v1.8h
  838. sub v1.8h, v17.8h, v1.8h
  839. add v2.8h, v3.8h, v2.8h
  840. sub v23.8h, v16.8h, v6.8h
  841. add v1.8h, v1.8h, v2.8h
  842. add v16.8h, v16.8h, v6.8h
  843. add v22.8h, v5.8h, v3.8h
  844. sub v17.8h, v5.8h, v3.8h
  845. sub v21.8h, v18.8h, v2.8h
  846. add v18.8h, v18.8h, v2.8h
  847. sub v19.8h, v20.8h, v1.8h
  848. add v20.8h, v20.8h, v1.8h
  849. /* Descale to 8-bit and range limit */
  850. movi v0.16b, #0x80
  851. /* Prepare pointers (dual-issue with NEON instructions) */
  852. ldp TMP1, TMP2, [OUTPUT_BUF], 16
  853. sqshrn v28.8b, v16.8h, #5
  854. ldp TMP3, TMP4, [OUTPUT_BUF], 16
  855. sqshrn v29.8b, v17.8h, #5
  856. add TMP1, TMP1, OUTPUT_COL
  857. sqshrn v30.8b, v18.8h, #5
  858. add TMP2, TMP2, OUTPUT_COL
  859. sqshrn v31.8b, v19.8h, #5
  860. add TMP3, TMP3, OUTPUT_COL
  861. sqshrn2 v28.16b, v20.8h, #5
  862. add TMP4, TMP4, OUTPUT_COL
  863. sqshrn2 v29.16b, v21.8h, #5
  864. ldp TMP5, TMP6, [OUTPUT_BUF], 16
  865. sqshrn2 v30.16b, v22.8h, #5
  866. ldp TMP7, TMP8, [OUTPUT_BUF], 16
  867. sqshrn2 v31.16b, v23.8h, #5
  868. add TMP5, TMP5, OUTPUT_COL
  869. add v16.16b, v28.16b, v0.16b
  870. add TMP6, TMP6, OUTPUT_COL
  871. add v18.16b, v29.16b, v0.16b
  872. add TMP7, TMP7, OUTPUT_COL
  873. add v20.16b, v30.16b, v0.16b
  874. add TMP8, TMP8, OUTPUT_COL
  875. add v22.16b, v31.16b, v0.16b
  876. /* Transpose the final 8-bit samples */
  877. trn1 v28.16b, v16.16b, v18.16b
  878. trn1 v30.16b, v20.16b, v22.16b
  879. trn2 v29.16b, v16.16b, v18.16b
  880. trn2 v31.16b, v20.16b, v22.16b
  881. trn1 v16.8h, v28.8h, v30.8h
  882. trn2 v18.8h, v28.8h, v30.8h
  883. trn1 v20.8h, v29.8h, v31.8h
  884. trn2 v22.8h, v29.8h, v31.8h
  885. uzp1 v28.4s, v16.4s, v18.4s
  886. uzp2 v30.4s, v16.4s, v18.4s
  887. uzp1 v29.4s, v20.4s, v22.4s
  888. uzp2 v31.4s, v20.4s, v22.4s
  889. /* Store results to the output buffer */
  890. st1 {v28.d}[0], [TMP1]
  891. st1 {v29.d}[0], [TMP2]
  892. st1 {v28.d}[1], [TMP3]
  893. st1 {v29.d}[1], [TMP4]
  894. st1 {v30.d}[0], [TMP5]
  895. st1 {v31.d}[0], [TMP6]
  896. st1 {v30.d}[1], [TMP7]
  897. st1 {v31.d}[1], [TMP8]
  898. blr x30
  899. .unreq DCT_TABLE
  900. .unreq COEF_BLOCK
  901. .unreq OUTPUT_BUF
  902. .unreq OUTPUT_COL
  903. .unreq TMP1
  904. .unreq TMP2
  905. .unreq TMP3
  906. .unreq TMP4
  907. .unreq TMP5
  908. .unreq TMP6
  909. .unreq TMP7
  910. .unreq TMP8
  911. /*****************************************************************************/
  912. /*
  913. * jsimd_idct_4x4_neon
  914. *
  915. * This function contains inverse-DCT code for getting reduced-size
  916. * 4x4 pixels output from an 8x8 DCT block. It uses the same calculations
  917. * and produces exactly the same output as IJG's original 'jpeg_idct_4x4'
  918. * function from jpeg-6b (jidctred.c).
  919. *
  920. * NOTE: jpeg-8 has an improved implementation of 4x4 inverse-DCT, which
  921. * requires much less arithmetic operations and hence should be faster.
  922. * The primary purpose of this particular NEON optimized function is
  923. * bit exact compatibility with jpeg-6b.
  924. *
  925. * TODO: a bit better instructions scheduling can be achieved by expanding
  926. * idct_helper/transpose_4x4 macros and reordering instructions,
  927. * but readability will suffer somewhat.
  928. */
  929. #define CONST_BITS 13
  930. #define FIX_0_211164243 (1730) /* FIX(0.211164243) */
  931. #define FIX_0_509795579 (4176) /* FIX(0.509795579) */
  932. #define FIX_0_601344887 (4926) /* FIX(0.601344887) */
  933. #define FIX_0_720959822 (5906) /* FIX(0.720959822) */
  934. #define FIX_0_765366865 (6270) /* FIX(0.765366865) */
  935. #define FIX_0_850430095 (6967) /* FIX(0.850430095) */
  936. #define FIX_0_899976223 (7373) /* FIX(0.899976223) */
  937. #define FIX_1_061594337 (8697) /* FIX(1.061594337) */
  938. #define FIX_1_272758580 (10426) /* FIX(1.272758580) */
  939. #define FIX_1_451774981 (11893) /* FIX(1.451774981) */
  940. #define FIX_1_847759065 (15137) /* FIX(1.847759065) */
  941. #define FIX_2_172734803 (17799) /* FIX(2.172734803) */
  942. #define FIX_2_562915447 (20995) /* FIX(2.562915447) */
  943. #define FIX_3_624509785 (29692) /* FIX(3.624509785) */
  944. .balign 16
  945. Ljsimd_idct_4x4_neon_consts:
  946. .short FIX_1_847759065 /* v0.h[0] */
  947. .short -FIX_0_765366865 /* v0.h[1] */
  948. .short -FIX_0_211164243 /* v0.h[2] */
  949. .short FIX_1_451774981 /* v0.h[3] */
  950. .short -FIX_2_172734803 /* d1[0] */
  951. .short FIX_1_061594337 /* d1[1] */
  952. .short -FIX_0_509795579 /* d1[2] */
  953. .short -FIX_0_601344887 /* d1[3] */
  954. .short FIX_0_899976223 /* v2.h[0] */
  955. .short FIX_2_562915447 /* v2.h[1] */
  956. .short 1 << (CONST_BITS+1) /* v2.h[2] */
  957. .short 0 /* v2.h[3] */
  958. .macro idct_helper x4, x6, x8, x10, x12, x14, x16, shift, y26, y27, y28, y29
  959. smull v28.4s, \x4, v2.h[2]
  960. smlal v28.4s, \x8, v0.h[0]
  961. smlal v28.4s, \x14, v0.h[1]
  962. smull v26.4s, \x16, v1.h[2]
  963. smlal v26.4s, \x12, v1.h[3]
  964. smlal v26.4s, \x10, v2.h[0]
  965. smlal v26.4s, \x6, v2.h[1]
  966. smull v30.4s, \x4, v2.h[2]
  967. smlsl v30.4s, \x8, v0.h[0]
  968. smlsl v30.4s, \x14, v0.h[1]
  969. smull v24.4s, \x16, v0.h[2]
  970. smlal v24.4s, \x12, v0.h[3]
  971. smlal v24.4s, \x10, v1.h[0]
  972. smlal v24.4s, \x6, v1.h[1]
  973. add v20.4s, v28.4s, v26.4s
  974. sub v28.4s, v28.4s, v26.4s
  975. .if \shift > 16
  976. srshr v20.4s, v20.4s, #\shift
  977. srshr v28.4s, v28.4s, #\shift
  978. xtn \y26, v20.4s
  979. xtn \y29, v28.4s
  980. .else
  981. rshrn \y26, v20.4s, #\shift
  982. rshrn \y29, v28.4s, #\shift
  983. .endif
  984. add v20.4s, v30.4s, v24.4s
  985. sub v30.4s, v30.4s, v24.4s
  986. .if \shift > 16
  987. srshr v20.4s, v20.4s, #\shift
  988. srshr v30.4s, v30.4s, #\shift
  989. xtn \y27, v20.4s
  990. xtn \y28, v30.4s
  991. .else
  992. rshrn \y27, v20.4s, #\shift
  993. rshrn \y28, v30.4s, #\shift
  994. .endif
  995. .endm
  996. asm_function jsimd_idct_4x4_neon
  997. DCT_TABLE .req x0
  998. COEF_BLOCK .req x1
  999. OUTPUT_BUF .req x2
  1000. OUTPUT_COL .req x3
  1001. TMP1 .req x0
  1002. TMP2 .req x1
  1003. TMP3 .req x2
  1004. TMP4 .req x15
  1005. /* OUTPUT_COL is a JDIMENSION (unsigned int) argument, so the ABI doesn't
  1006. guarantee that the upper (unused) 32 bits of x3 are valid. This
  1007. instruction ensures that those bits are set to zero. */
  1008. uxtw x3, w3
  1009. /* Save all used NEON registers */
  1010. sub sp, sp, 64
  1011. mov x9, sp
  1012. /* Load constants (v3.4h is just used for padding) */
  1013. adr TMP4, Ljsimd_idct_4x4_neon_consts
  1014. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x9], 32
  1015. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x9], 32
  1016. ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [TMP4]
  1017. /* Load all COEF_BLOCK into NEON registers with the following allocation:
  1018. * 0 1 2 3 | 4 5 6 7
  1019. * ---------+--------
  1020. * 0 | v4.4h | v5.4h
  1021. * 1 | v6.4h | v7.4h
  1022. * 2 | v8.4h | v9.4h
  1023. * 3 | v10.4h | v11.4h
  1024. * 4 | - | -
  1025. * 5 | v12.4h | v13.4h
  1026. * 6 | v14.4h | v15.4h
  1027. * 7 | v16.4h | v17.4h
  1028. */
  1029. ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [COEF_BLOCK], 32
  1030. ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [COEF_BLOCK], 32
  1031. add COEF_BLOCK, COEF_BLOCK, #16
  1032. ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [COEF_BLOCK], 32
  1033. ld1 {v16.4h, v17.4h}, [COEF_BLOCK], 16
  1034. /* dequantize */
  1035. ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [DCT_TABLE], 32
  1036. mul v4.4h, v4.4h, v18.4h
  1037. mul v5.4h, v5.4h, v19.4h
  1038. ins v4.d[1], v5.d[0] /* 128 bit q4 */
  1039. ld1 {v22.4h, v23.4h, v24.4h, v25.4h}, [DCT_TABLE], 32
  1040. mul v6.4h, v6.4h, v20.4h
  1041. mul v7.4h, v7.4h, v21.4h
  1042. ins v6.d[1], v7.d[0] /* 128 bit q6 */
  1043. mul v8.4h, v8.4h, v22.4h
  1044. mul v9.4h, v9.4h, v23.4h
  1045. ins v8.d[1], v9.d[0] /* 128 bit q8 */
  1046. add DCT_TABLE, DCT_TABLE, #16
  1047. ld1 {v26.4h, v27.4h, v28.4h, v29.4h}, [DCT_TABLE], 32
  1048. mul v10.4h, v10.4h, v24.4h
  1049. mul v11.4h, v11.4h, v25.4h
  1050. ins v10.d[1], v11.d[0] /* 128 bit q10 */
  1051. mul v12.4h, v12.4h, v26.4h
  1052. mul v13.4h, v13.4h, v27.4h
  1053. ins v12.d[1], v13.d[0] /* 128 bit q12 */
  1054. ld1 {v30.4h, v31.4h}, [DCT_TABLE], 16
  1055. mul v14.4h, v14.4h, v28.4h
  1056. mul v15.4h, v15.4h, v29.4h
  1057. ins v14.d[1], v15.d[0] /* 128 bit q14 */
  1058. mul v16.4h, v16.4h, v30.4h
  1059. mul v17.4h, v17.4h, v31.4h
  1060. ins v16.d[1], v17.d[0] /* 128 bit q16 */
  1061. /* Pass 1 */
  1062. idct_helper v4.4h, v6.4h, v8.4h, v10.4h, v12.4h, v14.4h, v16.4h, 12, \
  1063. v4.4h, v6.4h, v8.4h, v10.4h
  1064. transpose_4x4 v4, v6, v8, v10, v3
  1065. ins v10.d[1], v11.d[0]
  1066. idct_helper v5.4h, v7.4h, v9.4h, v11.4h, v13.4h, v15.4h, v17.4h, 12, \
  1067. v5.4h, v7.4h, v9.4h, v11.4h
  1068. transpose_4x4 v5, v7, v9, v11, v3
  1069. ins v10.d[1], v11.d[0]
  1070. /* Pass 2 */
  1071. idct_helper v4.4h, v6.4h, v8.4h, v10.4h, v7.4h, v9.4h, v11.4h, 19, \
  1072. v26.4h, v27.4h, v28.4h, v29.4h
  1073. transpose_4x4 v26, v27, v28, v29, v3
  1074. /* Range limit */
  1075. movi v30.8h, #0x80
  1076. ins v26.d[1], v27.d[0]
  1077. ins v28.d[1], v29.d[0]
  1078. add v26.8h, v26.8h, v30.8h
  1079. add v28.8h, v28.8h, v30.8h
  1080. sqxtun v26.8b, v26.8h
  1081. sqxtun v27.8b, v28.8h
  1082. /* Store results to the output buffer */
  1083. ldp TMP1, TMP2, [OUTPUT_BUF], 16
  1084. ldp TMP3, TMP4, [OUTPUT_BUF]
  1085. add TMP1, TMP1, OUTPUT_COL
  1086. add TMP2, TMP2, OUTPUT_COL
  1087. add TMP3, TMP3, OUTPUT_COL
  1088. add TMP4, TMP4, OUTPUT_COL
  1089. #if defined(__ARMEL__) && !RESPECT_STRICT_ALIGNMENT
  1090. /* We can use much less instructions on little endian systems if the
  1091. * OS kernel is not configured to trap unaligned memory accesses
  1092. */
  1093. st1 {v26.s}[0], [TMP1], 4
  1094. st1 {v27.s}[0], [TMP3], 4
  1095. st1 {v26.s}[1], [TMP2], 4
  1096. st1 {v27.s}[1], [TMP4], 4
  1097. #else
  1098. st1 {v26.b}[0], [TMP1], 1
  1099. st1 {v27.b}[0], [TMP3], 1
  1100. st1 {v26.b}[1], [TMP1], 1
  1101. st1 {v27.b}[1], [TMP3], 1
  1102. st1 {v26.b}[2], [TMP1], 1
  1103. st1 {v27.b}[2], [TMP3], 1
  1104. st1 {v26.b}[3], [TMP1], 1
  1105. st1 {v27.b}[3], [TMP3], 1
  1106. st1 {v26.b}[4], [TMP2], 1
  1107. st1 {v27.b}[4], [TMP4], 1
  1108. st1 {v26.b}[5], [TMP2], 1
  1109. st1 {v27.b}[5], [TMP4], 1
  1110. st1 {v26.b}[6], [TMP2], 1
  1111. st1 {v27.b}[6], [TMP4], 1
  1112. st1 {v26.b}[7], [TMP2], 1
  1113. st1 {v27.b}[7], [TMP4], 1
  1114. #endif
  1115. /* vpop {v8.4h - v15.4h} ;not available */
  1116. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  1117. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  1118. blr x30
  1119. .unreq DCT_TABLE
  1120. .unreq COEF_BLOCK
  1121. .unreq OUTPUT_BUF
  1122. .unreq OUTPUT_COL
  1123. .unreq TMP1
  1124. .unreq TMP2
  1125. .unreq TMP3
  1126. .unreq TMP4
  1127. .purgem idct_helper
  1128. /*****************************************************************************/
  1129. /*
  1130. * jsimd_idct_2x2_neon
  1131. *
  1132. * This function contains inverse-DCT code for getting reduced-size
  1133. * 2x2 pixels output from an 8x8 DCT block. It uses the same calculations
  1134. * and produces exactly the same output as IJG's original 'jpeg_idct_2x2'
  1135. * function from jpeg-6b (jidctred.c).
  1136. *
  1137. * NOTE: jpeg-8 has an improved implementation of 2x2 inverse-DCT, which
  1138. * requires much less arithmetic operations and hence should be faster.
  1139. * The primary purpose of this particular NEON optimized function is
  1140. * bit exact compatibility with jpeg-6b.
  1141. */
  1142. .balign 8
  1143. Ljsimd_idct_2x2_neon_consts:
  1144. .short -FIX_0_720959822 /* v14[0] */
  1145. .short FIX_0_850430095 /* v14[1] */
  1146. .short -FIX_1_272758580 /* v14[2] */
  1147. .short FIX_3_624509785 /* v14[3] */
  1148. .macro idct_helper x4, x6, x10, x12, x16, shift, y26, y27
  1149. sshll v15.4s, \x4, #15
  1150. smull v26.4s, \x6, v14.h[3]
  1151. smlal v26.4s, \x10, v14.h[2]
  1152. smlal v26.4s, \x12, v14.h[1]
  1153. smlal v26.4s, \x16, v14.h[0]
  1154. add v20.4s, v15.4s, v26.4s
  1155. sub v15.4s, v15.4s, v26.4s
  1156. .if \shift > 16
  1157. srshr v20.4s, v20.4s, #\shift
  1158. srshr v15.4s, v15.4s, #\shift
  1159. xtn \y26, v20.4s
  1160. xtn \y27, v15.4s
  1161. .else
  1162. rshrn \y26, v20.4s, #\shift
  1163. rshrn \y27, v15.4s, #\shift
  1164. .endif
  1165. .endm
  1166. asm_function jsimd_idct_2x2_neon
  1167. DCT_TABLE .req x0
  1168. COEF_BLOCK .req x1
  1169. OUTPUT_BUF .req x2
  1170. OUTPUT_COL .req x3
  1171. TMP1 .req x0
  1172. TMP2 .req x15
  1173. /* OUTPUT_COL is a JDIMENSION (unsigned int) argument, so the ABI doesn't
  1174. guarantee that the upper (unused) 32 bits of x3 are valid. This
  1175. instruction ensures that those bits are set to zero. */
  1176. uxtw x3, w3
  1177. /* vpush {v8.4h - v15.4h} ; not available */
  1178. sub sp, sp, 64
  1179. mov x9, sp
  1180. /* Load constants */
  1181. adr TMP2, Ljsimd_idct_2x2_neon_consts
  1182. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x9], 32
  1183. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x9], 32
  1184. ld1 {v14.4h}, [TMP2]
  1185. /* Load all COEF_BLOCK into NEON registers with the following allocation:
  1186. * 0 1 2 3 | 4 5 6 7
  1187. * ---------+--------
  1188. * 0 | v4.4h | v5.4h
  1189. * 1 | v6.4h | v7.4h
  1190. * 2 | - | -
  1191. * 3 | v10.4h | v11.4h
  1192. * 4 | - | -
  1193. * 5 | v12.4h | v13.4h
  1194. * 6 | - | -
  1195. * 7 | v16.4h | v17.4h
  1196. */
  1197. ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [COEF_BLOCK], 32
  1198. add COEF_BLOCK, COEF_BLOCK, #16
  1199. ld1 {v10.4h, v11.4h}, [COEF_BLOCK], 16
  1200. add COEF_BLOCK, COEF_BLOCK, #16
  1201. ld1 {v12.4h, v13.4h}, [COEF_BLOCK], 16
  1202. add COEF_BLOCK, COEF_BLOCK, #16
  1203. ld1 {v16.4h, v17.4h}, [COEF_BLOCK], 16
  1204. /* Dequantize */
  1205. ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [DCT_TABLE], 32
  1206. mul v4.4h, v4.4h, v18.4h
  1207. mul v5.4h, v5.4h, v19.4h
  1208. ins v4.d[1], v5.d[0]
  1209. mul v6.4h, v6.4h, v20.4h
  1210. mul v7.4h, v7.4h, v21.4h
  1211. ins v6.d[1], v7.d[0]
  1212. add DCT_TABLE, DCT_TABLE, #16
  1213. ld1 {v24.4h, v25.4h}, [DCT_TABLE], 16
  1214. mul v10.4h, v10.4h, v24.4h
  1215. mul v11.4h, v11.4h, v25.4h
  1216. ins v10.d[1], v11.d[0]
  1217. add DCT_TABLE, DCT_TABLE, #16
  1218. ld1 {v26.4h, v27.4h}, [DCT_TABLE], 16
  1219. mul v12.4h, v12.4h, v26.4h
  1220. mul v13.4h, v13.4h, v27.4h
  1221. ins v12.d[1], v13.d[0]
  1222. add DCT_TABLE, DCT_TABLE, #16
  1223. ld1 {v30.4h, v31.4h}, [DCT_TABLE], 16
  1224. mul v16.4h, v16.4h, v30.4h
  1225. mul v17.4h, v17.4h, v31.4h
  1226. ins v16.d[1], v17.d[0]
  1227. /* Pass 1 */
  1228. #if 0
  1229. idct_helper v4.4h, v6.4h, v10.4h, v12.4h, v16.4h, 13, v4.4h, v6.4h
  1230. transpose_4x4 v4.4h, v6.4h, v8.4h, v10.4h
  1231. idct_helper v5.4h, v7.4h, v11.4h, v13.4h, v17.4h, 13, v5.4h, v7.4h
  1232. transpose_4x4 v5.4h, v7.4h, v9.4h, v11.4h
  1233. #else
  1234. smull v26.4s, v6.4h, v14.h[3]
  1235. smlal v26.4s, v10.4h, v14.h[2]
  1236. smlal v26.4s, v12.4h, v14.h[1]
  1237. smlal v26.4s, v16.4h, v14.h[0]
  1238. smull v24.4s, v7.4h, v14.h[3]
  1239. smlal v24.4s, v11.4h, v14.h[2]
  1240. smlal v24.4s, v13.4h, v14.h[1]
  1241. smlal v24.4s, v17.4h, v14.h[0]
  1242. sshll v15.4s, v4.4h, #15
  1243. sshll v30.4s, v5.4h, #15
  1244. add v20.4s, v15.4s, v26.4s
  1245. sub v15.4s, v15.4s, v26.4s
  1246. rshrn v4.4h, v20.4s, #13
  1247. rshrn v6.4h, v15.4s, #13
  1248. add v20.4s, v30.4s, v24.4s
  1249. sub v15.4s, v30.4s, v24.4s
  1250. rshrn v5.4h, v20.4s, #13
  1251. rshrn v7.4h, v15.4s, #13
  1252. ins v4.d[1], v5.d[0]
  1253. ins v6.d[1], v7.d[0]
  1254. transpose v4, v6, v3, .16b, .8h
  1255. transpose v6, v10, v3, .16b, .4s
  1256. ins v11.d[0], v10.d[1]
  1257. ins v7.d[0], v6.d[1]
  1258. #endif
  1259. /* Pass 2 */
  1260. idct_helper v4.4h, v6.4h, v10.4h, v7.4h, v11.4h, 20, v26.4h, v27.4h
  1261. /* Range limit */
  1262. movi v30.8h, #0x80
  1263. ins v26.d[1], v27.d[0]
  1264. add v26.8h, v26.8h, v30.8h
  1265. sqxtun v30.8b, v26.8h
  1266. ins v26.d[0], v30.d[0]
  1267. sqxtun v27.8b, v26.8h
  1268. /* Store results to the output buffer */
  1269. ldp TMP1, TMP2, [OUTPUT_BUF]
  1270. add TMP1, TMP1, OUTPUT_COL
  1271. add TMP2, TMP2, OUTPUT_COL
  1272. st1 {v26.b}[0], [TMP1], 1
  1273. st1 {v27.b}[4], [TMP1], 1
  1274. st1 {v26.b}[1], [TMP2], 1
  1275. st1 {v27.b}[5], [TMP2], 1
  1276. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  1277. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  1278. blr x30
  1279. .unreq DCT_TABLE
  1280. .unreq COEF_BLOCK
  1281. .unreq OUTPUT_BUF
  1282. .unreq OUTPUT_COL
  1283. .unreq TMP1
  1284. .unreq TMP2
  1285. .purgem idct_helper
  1286. /*****************************************************************************/
  1287. /*
  1288. * jsimd_ycc_extrgb_convert_neon
  1289. * jsimd_ycc_extbgr_convert_neon
  1290. * jsimd_ycc_extrgbx_convert_neon
  1291. * jsimd_ycc_extbgrx_convert_neon
  1292. * jsimd_ycc_extxbgr_convert_neon
  1293. * jsimd_ycc_extxrgb_convert_neon
  1294. *
  1295. * Colorspace conversion YCbCr -> RGB
  1296. */
  1297. .macro do_load size
  1298. .if \size == 8
  1299. ld1 {v4.8b}, [U], 8
  1300. ld1 {v5.8b}, [V], 8
  1301. ld1 {v0.8b}, [Y], 8
  1302. prfm pldl1keep, [U, #64]
  1303. prfm pldl1keep, [V, #64]
  1304. prfm pldl1keep, [Y, #64]
  1305. .elseif \size == 4
  1306. ld1 {v4.b}[0], [U], 1
  1307. ld1 {v4.b}[1], [U], 1
  1308. ld1 {v4.b}[2], [U], 1
  1309. ld1 {v4.b}[3], [U], 1
  1310. ld1 {v5.b}[0], [V], 1
  1311. ld1 {v5.b}[1], [V], 1
  1312. ld1 {v5.b}[2], [V], 1
  1313. ld1 {v5.b}[3], [V], 1
  1314. ld1 {v0.b}[0], [Y], 1
  1315. ld1 {v0.b}[1], [Y], 1
  1316. ld1 {v0.b}[2], [Y], 1
  1317. ld1 {v0.b}[3], [Y], 1
  1318. .elseif \size == 2
  1319. ld1 {v4.b}[4], [U], 1
  1320. ld1 {v4.b}[5], [U], 1
  1321. ld1 {v5.b}[4], [V], 1
  1322. ld1 {v5.b}[5], [V], 1
  1323. ld1 {v0.b}[4], [Y], 1
  1324. ld1 {v0.b}[5], [Y], 1
  1325. .elseif \size == 1
  1326. ld1 {v4.b}[6], [U], 1
  1327. ld1 {v5.b}[6], [V], 1
  1328. ld1 {v0.b}[6], [Y], 1
  1329. .else
  1330. .error unsupported macroblock size
  1331. .endif
  1332. .endm
  1333. .macro do_store bpp, size, fast_st3
  1334. .if \bpp == 24
  1335. .if \size == 8
  1336. .if \fast_st3 == 1
  1337. st3 {v10.8b, v11.8b, v12.8b}, [RGB], 24
  1338. .else
  1339. st1 {v10.b}[0], [RGB], #1
  1340. st1 {v11.b}[0], [RGB], #1
  1341. st1 {v12.b}[0], [RGB], #1
  1342. st1 {v10.b}[1], [RGB], #1
  1343. st1 {v11.b}[1], [RGB], #1
  1344. st1 {v12.b}[1], [RGB], #1
  1345. st1 {v10.b}[2], [RGB], #1
  1346. st1 {v11.b}[2], [RGB], #1
  1347. st1 {v12.b}[2], [RGB], #1
  1348. st1 {v10.b}[3], [RGB], #1
  1349. st1 {v11.b}[3], [RGB], #1
  1350. st1 {v12.b}[3], [RGB], #1
  1351. st1 {v10.b}[4], [RGB], #1
  1352. st1 {v11.b}[4], [RGB], #1
  1353. st1 {v12.b}[4], [RGB], #1
  1354. st1 {v10.b}[5], [RGB], #1
  1355. st1 {v11.b}[5], [RGB], #1
  1356. st1 {v12.b}[5], [RGB], #1
  1357. st1 {v10.b}[6], [RGB], #1
  1358. st1 {v11.b}[6], [RGB], #1
  1359. st1 {v12.b}[6], [RGB], #1
  1360. st1 {v10.b}[7], [RGB], #1
  1361. st1 {v11.b}[7], [RGB], #1
  1362. st1 {v12.b}[7], [RGB], #1
  1363. .endif
  1364. .elseif \size == 4
  1365. st3 {v10.b, v11.b, v12.b}[0], [RGB], 3
  1366. st3 {v10.b, v11.b, v12.b}[1], [RGB], 3
  1367. st3 {v10.b, v11.b, v12.b}[2], [RGB], 3
  1368. st3 {v10.b, v11.b, v12.b}[3], [RGB], 3
  1369. .elseif \size == 2
  1370. st3 {v10.b, v11.b, v12.b}[4], [RGB], 3
  1371. st3 {v10.b, v11.b, v12.b}[5], [RGB], 3
  1372. .elseif \size == 1
  1373. st3 {v10.b, v11.b, v12.b}[6], [RGB], 3
  1374. .else
  1375. .error unsupported macroblock size
  1376. .endif
  1377. .elseif \bpp == 32
  1378. .if \size == 8
  1379. st4 {v10.8b, v11.8b, v12.8b, v13.8b}, [RGB], 32
  1380. .elseif \size == 4
  1381. st4 {v10.b, v11.b, v12.b, v13.b}[0], [RGB], 4
  1382. st4 {v10.b, v11.b, v12.b, v13.b}[1], [RGB], 4
  1383. st4 {v10.b, v11.b, v12.b, v13.b}[2], [RGB], 4
  1384. st4 {v10.b, v11.b, v12.b, v13.b}[3], [RGB], 4
  1385. .elseif \size == 2
  1386. st4 {v10.b, v11.b, v12.b, v13.b}[4], [RGB], 4
  1387. st4 {v10.b, v11.b, v12.b, v13.b}[5], [RGB], 4
  1388. .elseif \size == 1
  1389. st4 {v10.b, v11.b, v12.b, v13.b}[6], [RGB], 4
  1390. .else
  1391. .error unsupported macroblock size
  1392. .endif
  1393. .elseif \bpp==16
  1394. .if \size == 8
  1395. st1 {v25.8h}, [RGB], 16
  1396. .elseif \size == 4
  1397. st1 {v25.4h}, [RGB], 8
  1398. .elseif \size == 2
  1399. st1 {v25.h}[4], [RGB], 2
  1400. st1 {v25.h}[5], [RGB], 2
  1401. .elseif \size == 1
  1402. st1 {v25.h}[6], [RGB], 2
  1403. .else
  1404. .error unsupported macroblock size
  1405. .endif
  1406. .else
  1407. .error unsupported bpp
  1408. .endif
  1409. .endm
  1410. .macro generate_jsimd_ycc_rgb_convert_neon colorid, bpp, r_offs, rsize, \
  1411. g_offs, gsize, b_offs, bsize, \
  1412. defsize, fast_st3
  1413. /*
  1414. * 2-stage pipelined YCbCr->RGB conversion
  1415. */
  1416. .macro do_yuv_to_rgb_stage1
  1417. uaddw v6.8h, v2.8h, v4.8b /* q3 = u - 128 */
  1418. uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */
  1419. smull v20.4s, v6.4h, v1.h[1] /* multiply by -11277 */
  1420. smlal v20.4s, v8.4h, v1.h[2] /* multiply by -23401 */
  1421. smull2 v22.4s, v6.8h, v1.h[1] /* multiply by -11277 */
  1422. smlal2 v22.4s, v8.8h, v1.h[2] /* multiply by -23401 */
  1423. smull v24.4s, v8.4h, v1.h[0] /* multiply by 22971 */
  1424. smull2 v26.4s, v8.8h, v1.h[0] /* multiply by 22971 */
  1425. smull v28.4s, v6.4h, v1.h[3] /* multiply by 29033 */
  1426. smull2 v30.4s, v6.8h, v1.h[3] /* multiply by 29033 */
  1427. .endm
  1428. .macro do_yuv_to_rgb_stage2
  1429. rshrn v20.4h, v20.4s, #15
  1430. rshrn2 v20.8h, v22.4s, #15
  1431. rshrn v24.4h, v24.4s, #14
  1432. rshrn2 v24.8h, v26.4s, #14
  1433. rshrn v28.4h, v28.4s, #14
  1434. rshrn2 v28.8h, v30.4s, #14
  1435. uaddw v20.8h, v20.8h, v0.8b
  1436. uaddw v24.8h, v24.8h, v0.8b
  1437. uaddw v28.8h, v28.8h, v0.8b
  1438. .if \bpp != 16
  1439. sqxtun v1\g_offs\defsize, v20.8h
  1440. sqxtun v1\r_offs\defsize, v24.8h
  1441. sqxtun v1\b_offs\defsize, v28.8h
  1442. .else
  1443. sqshlu v21.8h, v20.8h, #8
  1444. sqshlu v25.8h, v24.8h, #8
  1445. sqshlu v29.8h, v28.8h, #8
  1446. sri v25.8h, v21.8h, #5
  1447. sri v25.8h, v29.8h, #11
  1448. .endif
  1449. .endm
  1450. .macro do_yuv_to_rgb_stage2_store_load_stage1 fast_st3
  1451. rshrn v20.4h, v20.4s, #15
  1452. rshrn v24.4h, v24.4s, #14
  1453. rshrn v28.4h, v28.4s, #14
  1454. ld1 {v4.8b}, [U], 8
  1455. rshrn2 v20.8h, v22.4s, #15
  1456. rshrn2 v24.8h, v26.4s, #14
  1457. rshrn2 v28.8h, v30.4s, #14
  1458. ld1 {v5.8b}, [V], 8
  1459. uaddw v20.8h, v20.8h, v0.8b
  1460. uaddw v24.8h, v24.8h, v0.8b
  1461. uaddw v28.8h, v28.8h, v0.8b
  1462. .if \bpp != 16 /**************** rgb24/rgb32 ******************************/
  1463. sqxtun v1\g_offs\defsize, v20.8h
  1464. ld1 {v0.8b}, [Y], 8
  1465. sqxtun v1\r_offs\defsize, v24.8h
  1466. prfm pldl1keep, [U, #64]
  1467. prfm pldl1keep, [V, #64]
  1468. prfm pldl1keep, [Y, #64]
  1469. sqxtun v1\b_offs\defsize, v28.8h
  1470. uaddw v6.8h, v2.8h, v4.8b /* v6.16b = u - 128 */
  1471. uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */
  1472. smull v20.4s, v6.4h, v1.h[1] /* multiply by -11277 */
  1473. smlal v20.4s, v8.4h, v1.h[2] /* multiply by -23401 */
  1474. smull2 v22.4s, v6.8h, v1.h[1] /* multiply by -11277 */
  1475. smlal2 v22.4s, v8.8h, v1.h[2] /* multiply by -23401 */
  1476. smull v24.4s, v8.4h, v1.h[0] /* multiply by 22971 */
  1477. smull2 v26.4s, v8.8h, v1.h[0] /* multiply by 22971 */
  1478. .else /**************************** rgb565 ********************************/
  1479. sqshlu v21.8h, v20.8h, #8
  1480. sqshlu v25.8h, v24.8h, #8
  1481. sqshlu v29.8h, v28.8h, #8
  1482. uaddw v6.8h, v2.8h, v4.8b /* v6.16b = u - 128 */
  1483. uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */
  1484. ld1 {v0.8b}, [Y], 8
  1485. smull v20.4s, v6.4h, v1.h[1] /* multiply by -11277 */
  1486. smlal v20.4s, v8.4h, v1.h[2] /* multiply by -23401 */
  1487. smull2 v22.4s, v6.8h, v1.h[1] /* multiply by -11277 */
  1488. smlal2 v22.4s, v8.8h, v1.h[2] /* multiply by -23401 */
  1489. sri v25.8h, v21.8h, #5
  1490. smull v24.4s, v8.4h, v1.h[0] /* multiply by 22971 */
  1491. smull2 v26.4s, v8.8h, v1.h[0] /* multiply by 22971 */
  1492. prfm pldl1keep, [U, #64]
  1493. prfm pldl1keep, [V, #64]
  1494. prfm pldl1keep, [Y, #64]
  1495. sri v25.8h, v29.8h, #11
  1496. .endif
  1497. do_store \bpp, 8, \fast_st3
  1498. smull v28.4s, v6.4h, v1.h[3] /* multiply by 29033 */
  1499. smull2 v30.4s, v6.8h, v1.h[3] /* multiply by 29033 */
  1500. .endm
  1501. .macro do_yuv_to_rgb
  1502. do_yuv_to_rgb_stage1
  1503. do_yuv_to_rgb_stage2
  1504. .endm
  1505. /* Apple gas crashes on adrl, work around that by using adr.
  1506. * But this requires a copy of these constants for each function.
  1507. */
  1508. .balign 16
  1509. .if \fast_st3 == 1
  1510. Ljsimd_ycc_\colorid\()_neon_consts:
  1511. .else
  1512. Ljsimd_ycc_\colorid\()_neon_slowst3_consts:
  1513. .endif
  1514. .short 0, 0, 0, 0
  1515. .short 22971, -11277, -23401, 29033
  1516. .short -128, -128, -128, -128
  1517. .short -128, -128, -128, -128
  1518. .if \fast_st3 == 1
  1519. asm_function jsimd_ycc_\colorid\()_convert_neon
  1520. .else
  1521. asm_function jsimd_ycc_\colorid\()_convert_neon_slowst3
  1522. .endif
  1523. OUTPUT_WIDTH .req w0
  1524. INPUT_BUF .req x1
  1525. INPUT_ROW .req w2
  1526. OUTPUT_BUF .req x3
  1527. NUM_ROWS .req w4
  1528. INPUT_BUF0 .req x5
  1529. INPUT_BUF1 .req x6
  1530. INPUT_BUF2 .req x1
  1531. RGB .req x7
  1532. Y .req x9
  1533. U .req x10
  1534. V .req x11
  1535. N .req w15
  1536. sub sp, sp, 64
  1537. mov x9, sp
  1538. /* Load constants to d1, d2, d3 (v0.4h is just used for padding) */
  1539. .if \fast_st3 == 1
  1540. adr x15, Ljsimd_ycc_\colorid\()_neon_consts
  1541. .else
  1542. adr x15, Ljsimd_ycc_\colorid\()_neon_slowst3_consts
  1543. .endif
  1544. /* Save NEON registers */
  1545. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x9], 32
  1546. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x9], 32
  1547. ld1 {v0.4h, v1.4h}, [x15], 16
  1548. ld1 {v2.8h}, [x15]
  1549. ldr INPUT_BUF0, [INPUT_BUF]
  1550. ldr INPUT_BUF1, [INPUT_BUF, #8]
  1551. ldr INPUT_BUF2, [INPUT_BUF, #16]
  1552. .unreq INPUT_BUF
  1553. /* Initially set v10, v11.4h, v12.8b, d13 to 0xFF */
  1554. movi v10.16b, #255
  1555. movi v13.16b, #255
  1556. /* Outer loop over scanlines */
  1557. cmp NUM_ROWS, #1
  1558. b.lt 9f
  1559. 0:
  1560. ldr Y, [INPUT_BUF0, INPUT_ROW, uxtw #3]
  1561. ldr U, [INPUT_BUF1, INPUT_ROW, uxtw #3]
  1562. mov N, OUTPUT_WIDTH
  1563. ldr V, [INPUT_BUF2, INPUT_ROW, uxtw #3]
  1564. add INPUT_ROW, INPUT_ROW, #1
  1565. ldr RGB, [OUTPUT_BUF], #8
  1566. /* Inner loop over pixels */
  1567. subs N, N, #8
  1568. b.lt 3f
  1569. do_load 8
  1570. do_yuv_to_rgb_stage1
  1571. subs N, N, #8
  1572. b.lt 2f
  1573. 1:
  1574. do_yuv_to_rgb_stage2_store_load_stage1 \fast_st3
  1575. subs N, N, #8
  1576. b.ge 1b
  1577. 2:
  1578. do_yuv_to_rgb_stage2
  1579. do_store \bpp, 8, \fast_st3
  1580. tst N, #7
  1581. b.eq 8f
  1582. 3:
  1583. tst N, #4
  1584. b.eq 3f
  1585. do_load 4
  1586. 3:
  1587. tst N, #2
  1588. b.eq 4f
  1589. do_load 2
  1590. 4:
  1591. tst N, #1
  1592. b.eq 5f
  1593. do_load 1
  1594. 5:
  1595. do_yuv_to_rgb
  1596. tst N, #4
  1597. b.eq 6f
  1598. do_store \bpp, 4, \fast_st3
  1599. 6:
  1600. tst N, #2
  1601. b.eq 7f
  1602. do_store \bpp, 2, \fast_st3
  1603. 7:
  1604. tst N, #1
  1605. b.eq 8f
  1606. do_store \bpp, 1, \fast_st3
  1607. 8:
  1608. subs NUM_ROWS, NUM_ROWS, #1
  1609. b.gt 0b
  1610. 9:
  1611. /* Restore all registers and return */
  1612. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  1613. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  1614. br x30
  1615. .unreq OUTPUT_WIDTH
  1616. .unreq INPUT_ROW
  1617. .unreq OUTPUT_BUF
  1618. .unreq NUM_ROWS
  1619. .unreq INPUT_BUF0
  1620. .unreq INPUT_BUF1
  1621. .unreq INPUT_BUF2
  1622. .unreq RGB
  1623. .unreq Y
  1624. .unreq U
  1625. .unreq V
  1626. .unreq N
  1627. .purgem do_yuv_to_rgb
  1628. .purgem do_yuv_to_rgb_stage1
  1629. .purgem do_yuv_to_rgb_stage2
  1630. .purgem do_yuv_to_rgb_stage2_store_load_stage1
  1631. .endm
  1632. /*--------------------------------- id ----- bpp R rsize G gsize B bsize defsize fast_st3*/
  1633. generate_jsimd_ycc_rgb_convert_neon extrgb, 24, 0, .4h, 1, .4h, 2, .4h, .8b, 1
  1634. generate_jsimd_ycc_rgb_convert_neon extbgr, 24, 2, .4h, 1, .4h, 0, .4h, .8b, 1
  1635. generate_jsimd_ycc_rgb_convert_neon extrgbx, 32, 0, .4h, 1, .4h, 2, .4h, .8b, 1
  1636. generate_jsimd_ycc_rgb_convert_neon extbgrx, 32, 2, .4h, 1, .4h, 0, .4h, .8b, 1
  1637. generate_jsimd_ycc_rgb_convert_neon extxbgr, 32, 3, .4h, 2, .4h, 1, .4h, .8b, 1
  1638. generate_jsimd_ycc_rgb_convert_neon extxrgb, 32, 1, .4h, 2, .4h, 3, .4h, .8b, 1
  1639. generate_jsimd_ycc_rgb_convert_neon rgb565, 16, 0, .4h, 0, .4h, 0, .4h, .8b, 1
  1640. generate_jsimd_ycc_rgb_convert_neon extrgb, 24, 0, .4h, 1, .4h, 2, .4h, .8b, 0
  1641. generate_jsimd_ycc_rgb_convert_neon extbgr, 24, 2, .4h, 1, .4h, 0, .4h, .8b, 0
  1642. .purgem do_load
  1643. .purgem do_store
  1644. /*****************************************************************************/
  1645. /*
  1646. * jsimd_extrgb_ycc_convert_neon
  1647. * jsimd_extbgr_ycc_convert_neon
  1648. * jsimd_extrgbx_ycc_convert_neon
  1649. * jsimd_extbgrx_ycc_convert_neon
  1650. * jsimd_extxbgr_ycc_convert_neon
  1651. * jsimd_extxrgb_ycc_convert_neon
  1652. *
  1653. * Colorspace conversion RGB -> YCbCr
  1654. */
  1655. .macro do_store size
  1656. .if \size == 8
  1657. st1 {v20.8b}, [Y], #8
  1658. st1 {v21.8b}, [U], #8
  1659. st1 {v22.8b}, [V], #8
  1660. .elseif \size == 4
  1661. st1 {v20.b}[0], [Y], #1
  1662. st1 {v20.b}[1], [Y], #1
  1663. st1 {v20.b}[2], [Y], #1
  1664. st1 {v20.b}[3], [Y], #1
  1665. st1 {v21.b}[0], [U], #1
  1666. st1 {v21.b}[1], [U], #1
  1667. st1 {v21.b}[2], [U], #1
  1668. st1 {v21.b}[3], [U], #1
  1669. st1 {v22.b}[0], [V], #1
  1670. st1 {v22.b}[1], [V], #1
  1671. st1 {v22.b}[2], [V], #1
  1672. st1 {v22.b}[3], [V], #1
  1673. .elseif \size == 2
  1674. st1 {v20.b}[4], [Y], #1
  1675. st1 {v20.b}[5], [Y], #1
  1676. st1 {v21.b}[4], [U], #1
  1677. st1 {v21.b}[5], [U], #1
  1678. st1 {v22.b}[4], [V], #1
  1679. st1 {v22.b}[5], [V], #1
  1680. .elseif \size == 1
  1681. st1 {v20.b}[6], [Y], #1
  1682. st1 {v21.b}[6], [U], #1
  1683. st1 {v22.b}[6], [V], #1
  1684. .else
  1685. .error unsupported macroblock size
  1686. .endif
  1687. .endm
  1688. .macro do_load bpp, size, fast_ld3
  1689. .if \bpp == 24
  1690. .if \size == 8
  1691. .if \fast_ld3 == 1
  1692. ld3 {v10.8b, v11.8b, v12.8b}, [RGB], #24
  1693. .else
  1694. ld1 {v10.b}[0], [RGB], #1
  1695. ld1 {v11.b}[0], [RGB], #1
  1696. ld1 {v12.b}[0], [RGB], #1
  1697. ld1 {v10.b}[1], [RGB], #1
  1698. ld1 {v11.b}[1], [RGB], #1
  1699. ld1 {v12.b}[1], [RGB], #1
  1700. ld1 {v10.b}[2], [RGB], #1
  1701. ld1 {v11.b}[2], [RGB], #1
  1702. ld1 {v12.b}[2], [RGB], #1
  1703. ld1 {v10.b}[3], [RGB], #1
  1704. ld1 {v11.b}[3], [RGB], #1
  1705. ld1 {v12.b}[3], [RGB], #1
  1706. ld1 {v10.b}[4], [RGB], #1
  1707. ld1 {v11.b}[4], [RGB], #1
  1708. ld1 {v12.b}[4], [RGB], #1
  1709. ld1 {v10.b}[5], [RGB], #1
  1710. ld1 {v11.b}[5], [RGB], #1
  1711. ld1 {v12.b}[5], [RGB], #1
  1712. ld1 {v10.b}[6], [RGB], #1
  1713. ld1 {v11.b}[6], [RGB], #1
  1714. ld1 {v12.b}[6], [RGB], #1
  1715. ld1 {v10.b}[7], [RGB], #1
  1716. ld1 {v11.b}[7], [RGB], #1
  1717. ld1 {v12.b}[7], [RGB], #1
  1718. .endif
  1719. prfm pldl1keep, [RGB, #128]
  1720. .elseif \size == 4
  1721. ld3 {v10.b, v11.b, v12.b}[0], [RGB], #3
  1722. ld3 {v10.b, v11.b, v12.b}[1], [RGB], #3
  1723. ld3 {v10.b, v11.b, v12.b}[2], [RGB], #3
  1724. ld3 {v10.b, v11.b, v12.b}[3], [RGB], #3
  1725. .elseif \size == 2
  1726. ld3 {v10.b, v11.b, v12.b}[4], [RGB], #3
  1727. ld3 {v10.b, v11.b, v12.b}[5], [RGB], #3
  1728. .elseif \size == 1
  1729. ld3 {v10.b, v11.b, v12.b}[6], [RGB], #3
  1730. .else
  1731. .error unsupported macroblock size
  1732. .endif
  1733. .elseif \bpp == 32
  1734. .if \size == 8
  1735. ld4 {v10.8b, v11.8b, v12.8b, v13.8b}, [RGB], #32
  1736. prfm pldl1keep, [RGB, #128]
  1737. .elseif \size == 4
  1738. ld4 {v10.b, v11.b, v12.b, v13.b}[0], [RGB], #4
  1739. ld4 {v10.b, v11.b, v12.b, v13.b}[1], [RGB], #4
  1740. ld4 {v10.b, v11.b, v12.b, v13.b}[2], [RGB], #4
  1741. ld4 {v10.b, v11.b, v12.b, v13.b}[3], [RGB], #4
  1742. .elseif \size == 2
  1743. ld4 {v10.b, v11.b, v12.b, v13.b}[4], [RGB], #4
  1744. ld4 {v10.b, v11.b, v12.b, v13.b}[5], [RGB], #4
  1745. .elseif \size == 1
  1746. ld4 {v10.b, v11.b, v12.b, v13.b}[6], [RGB], #4
  1747. .else
  1748. .error unsupported macroblock size
  1749. .endif
  1750. .else
  1751. .error unsupported bpp
  1752. .endif
  1753. .endm
  1754. .macro generate_jsimd_rgb_ycc_convert_neon colorid, bpp, r_offs, g_offs, \
  1755. b_offs, fast_ld3
  1756. /*
  1757. * 2-stage pipelined RGB->YCbCr conversion
  1758. */
  1759. .macro do_rgb_to_yuv_stage1
  1760. ushll v4.8h, v1\r_offs\().8b, #0 /* r = v4 */
  1761. ushll v6.8h, v1\g_offs\().8b, #0 /* g = v6 */
  1762. ushll v8.8h, v1\b_offs\().8b, #0 /* b = v8 */
  1763. rev64 v18.4s, v1.4s
  1764. rev64 v26.4s, v1.4s
  1765. rev64 v28.4s, v1.4s
  1766. rev64 v30.4s, v1.4s
  1767. umull v14.4s, v4.4h, v0.h[0]
  1768. umull2 v16.4s, v4.8h, v0.h[0]
  1769. umlsl v18.4s, v4.4h, v0.h[3]
  1770. umlsl2 v26.4s, v4.8h, v0.h[3]
  1771. umlal v28.4s, v4.4h, v0.h[5]
  1772. umlal2 v30.4s, v4.8h, v0.h[5]
  1773. umlal v14.4s, v6.4h, v0.h[1]
  1774. umlal2 v16.4s, v6.8h, v0.h[1]
  1775. umlsl v18.4s, v6.4h, v0.h[4]
  1776. umlsl2 v26.4s, v6.8h, v0.h[4]
  1777. umlsl v28.4s, v6.4h, v0.h[6]
  1778. umlsl2 v30.4s, v6.8h, v0.h[6]
  1779. umlal v14.4s, v8.4h, v0.h[2]
  1780. umlal2 v16.4s, v8.8h, v0.h[2]
  1781. umlal v18.4s, v8.4h, v0.h[5]
  1782. umlal2 v26.4s, v8.8h, v0.h[5]
  1783. umlsl v28.4s, v8.4h, v0.h[7]
  1784. umlsl2 v30.4s, v8.8h, v0.h[7]
  1785. .endm
  1786. .macro do_rgb_to_yuv_stage2
  1787. rshrn v20.4h, v14.4s, #16
  1788. shrn v22.4h, v18.4s, #16
  1789. shrn v24.4h, v28.4s, #16
  1790. rshrn2 v20.8h, v16.4s, #16
  1791. shrn2 v22.8h, v26.4s, #16
  1792. shrn2 v24.8h, v30.4s, #16
  1793. xtn v20.8b, v20.8h /* v20 = y */
  1794. xtn v21.8b, v22.8h /* v21 = u */
  1795. xtn v22.8b, v24.8h /* v22 = v */
  1796. .endm
  1797. .macro do_rgb_to_yuv
  1798. do_rgb_to_yuv_stage1
  1799. do_rgb_to_yuv_stage2
  1800. .endm
  1801. /* TODO: expand macros and interleave instructions if some in-order
  1802. * ARM64 processor actually can dual-issue LOAD/STORE with ALU */
  1803. .macro do_rgb_to_yuv_stage2_store_load_stage1 fast_ld3
  1804. do_rgb_to_yuv_stage2
  1805. do_load \bpp, 8, \fast_ld3
  1806. st1 {v20.8b}, [Y], #8
  1807. st1 {v21.8b}, [U], #8
  1808. st1 {v22.8b}, [V], #8
  1809. do_rgb_to_yuv_stage1
  1810. .endm
  1811. .balign 16
  1812. .if \fast_ld3 == 1
  1813. Ljsimd_\colorid\()_ycc_neon_consts:
  1814. .else
  1815. Ljsimd_\colorid\()_ycc_neon_slowld3_consts:
  1816. .endif
  1817. .short 19595, 38470, 7471, 11059
  1818. .short 21709, 32768, 27439, 5329
  1819. .short 32767, 128, 32767, 128
  1820. .short 32767, 128, 32767, 128
  1821. .if \fast_ld3 == 1
  1822. asm_function jsimd_\colorid\()_ycc_convert_neon
  1823. .else
  1824. asm_function jsimd_\colorid\()_ycc_convert_neon_slowld3
  1825. .endif
  1826. OUTPUT_WIDTH .req w0
  1827. INPUT_BUF .req x1
  1828. OUTPUT_BUF .req x2
  1829. OUTPUT_ROW .req w3
  1830. NUM_ROWS .req w4
  1831. OUTPUT_BUF0 .req x5
  1832. OUTPUT_BUF1 .req x6
  1833. OUTPUT_BUF2 .req x2 /* OUTPUT_BUF */
  1834. RGB .req x7
  1835. Y .req x9
  1836. U .req x10
  1837. V .req x11
  1838. N .req w12
  1839. /* Load constants to d0, d1, d2, d3 */
  1840. .if \fast_ld3 == 1
  1841. adr x13, Ljsimd_\colorid\()_ycc_neon_consts
  1842. .else
  1843. adr x13, Ljsimd_\colorid\()_ycc_neon_slowld3_consts
  1844. .endif
  1845. ld1 {v0.8h, v1.8h}, [x13]
  1846. ldr OUTPUT_BUF0, [OUTPUT_BUF]
  1847. ldr OUTPUT_BUF1, [OUTPUT_BUF, #8]
  1848. ldr OUTPUT_BUF2, [OUTPUT_BUF, #16]
  1849. .unreq OUTPUT_BUF
  1850. /* Save NEON registers */
  1851. sub sp, sp, #64
  1852. mov x9, sp
  1853. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x9], 32
  1854. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x9], 32
  1855. /* Outer loop over scanlines */
  1856. cmp NUM_ROWS, #1
  1857. b.lt 9f
  1858. 0:
  1859. ldr Y, [OUTPUT_BUF0, OUTPUT_ROW, uxtw #3]
  1860. ldr U, [OUTPUT_BUF1, OUTPUT_ROW, uxtw #3]
  1861. mov N, OUTPUT_WIDTH
  1862. ldr V, [OUTPUT_BUF2, OUTPUT_ROW, uxtw #3]
  1863. add OUTPUT_ROW, OUTPUT_ROW, #1
  1864. ldr RGB, [INPUT_BUF], #8
  1865. /* Inner loop over pixels */
  1866. subs N, N, #8
  1867. b.lt 3f
  1868. do_load \bpp, 8, \fast_ld3
  1869. do_rgb_to_yuv_stage1
  1870. subs N, N, #8
  1871. b.lt 2f
  1872. 1:
  1873. do_rgb_to_yuv_stage2_store_load_stage1 \fast_ld3
  1874. subs N, N, #8
  1875. b.ge 1b
  1876. 2:
  1877. do_rgb_to_yuv_stage2
  1878. do_store 8
  1879. tst N, #7
  1880. b.eq 8f
  1881. 3:
  1882. tbz N, #2, 3f
  1883. do_load \bpp, 4, \fast_ld3
  1884. 3:
  1885. tbz N, #1, 4f
  1886. do_load \bpp, 2, \fast_ld3
  1887. 4:
  1888. tbz N, #0, 5f
  1889. do_load \bpp, 1, \fast_ld3
  1890. 5:
  1891. do_rgb_to_yuv
  1892. tbz N, #2, 6f
  1893. do_store 4
  1894. 6:
  1895. tbz N, #1, 7f
  1896. do_store 2
  1897. 7:
  1898. tbz N, #0, 8f
  1899. do_store 1
  1900. 8:
  1901. subs NUM_ROWS, NUM_ROWS, #1
  1902. b.gt 0b
  1903. 9:
  1904. /* Restore all registers and return */
  1905. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  1906. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  1907. br x30
  1908. .unreq OUTPUT_WIDTH
  1909. .unreq OUTPUT_ROW
  1910. .unreq INPUT_BUF
  1911. .unreq NUM_ROWS
  1912. .unreq OUTPUT_BUF0
  1913. .unreq OUTPUT_BUF1
  1914. .unreq OUTPUT_BUF2
  1915. .unreq RGB
  1916. .unreq Y
  1917. .unreq U
  1918. .unreq V
  1919. .unreq N
  1920. .purgem do_rgb_to_yuv
  1921. .purgem do_rgb_to_yuv_stage1
  1922. .purgem do_rgb_to_yuv_stage2
  1923. .purgem do_rgb_to_yuv_stage2_store_load_stage1
  1924. .endm
  1925. /*--------------------------------- id ----- bpp R G B Fast LD3 */
  1926. generate_jsimd_rgb_ycc_convert_neon extrgb, 24, 0, 1, 2, 1
  1927. generate_jsimd_rgb_ycc_convert_neon extbgr, 24, 2, 1, 0, 1
  1928. generate_jsimd_rgb_ycc_convert_neon extrgbx, 32, 0, 1, 2, 1
  1929. generate_jsimd_rgb_ycc_convert_neon extbgrx, 32, 2, 1, 0, 1
  1930. generate_jsimd_rgb_ycc_convert_neon extxbgr, 32, 3, 2, 1, 1
  1931. generate_jsimd_rgb_ycc_convert_neon extxrgb, 32, 1, 2, 3, 1
  1932. generate_jsimd_rgb_ycc_convert_neon extrgb, 24, 0, 1, 2, 0
  1933. generate_jsimd_rgb_ycc_convert_neon extbgr, 24, 2, 1, 0, 0
  1934. .purgem do_load
  1935. .purgem do_store
  1936. /*****************************************************************************/
  1937. /*
  1938. * Load data into workspace, applying unsigned->signed conversion
  1939. *
  1940. * TODO: can be combined with 'jsimd_fdct_ifast_neon' to get
  1941. * rid of VST1.16 instructions
  1942. */
  1943. asm_function jsimd_convsamp_neon
  1944. SAMPLE_DATA .req x0
  1945. START_COL .req x1
  1946. WORKSPACE .req x2
  1947. TMP1 .req x9
  1948. TMP2 .req x10
  1949. TMP3 .req x11
  1950. TMP4 .req x12
  1951. TMP5 .req x13
  1952. TMP6 .req x14
  1953. TMP7 .req x15
  1954. TMP8 .req x4
  1955. TMPDUP .req w3
  1956. /* START_COL is a JDIMENSION (unsigned int) argument, so the ABI doesn't
  1957. guarantee that the upper (unused) 32 bits of x1 are valid. This
  1958. instruction ensures that those bits are set to zero. */
  1959. uxtw x1, w1
  1960. mov TMPDUP, #128
  1961. ldp TMP1, TMP2, [SAMPLE_DATA], 16
  1962. ldp TMP3, TMP4, [SAMPLE_DATA], 16
  1963. dup v0.8b, TMPDUP
  1964. add TMP1, TMP1, START_COL
  1965. add TMP2, TMP2, START_COL
  1966. ldp TMP5, TMP6, [SAMPLE_DATA], 16
  1967. add TMP3, TMP3, START_COL
  1968. add TMP4, TMP4, START_COL
  1969. ldp TMP7, TMP8, [SAMPLE_DATA], 16
  1970. add TMP5, TMP5, START_COL
  1971. add TMP6, TMP6, START_COL
  1972. ld1 {v16.8b}, [TMP1]
  1973. add TMP7, TMP7, START_COL
  1974. add TMP8, TMP8, START_COL
  1975. ld1 {v17.8b}, [TMP2]
  1976. usubl v16.8h, v16.8b, v0.8b
  1977. ld1 {v18.8b}, [TMP3]
  1978. usubl v17.8h, v17.8b, v0.8b
  1979. ld1 {v19.8b}, [TMP4]
  1980. usubl v18.8h, v18.8b, v0.8b
  1981. ld1 {v20.8b}, [TMP5]
  1982. usubl v19.8h, v19.8b, v0.8b
  1983. ld1 {v21.8b}, [TMP6]
  1984. st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [WORKSPACE], 64
  1985. usubl v20.8h, v20.8b, v0.8b
  1986. ld1 {v22.8b}, [TMP7]
  1987. usubl v21.8h, v21.8b, v0.8b
  1988. ld1 {v23.8b}, [TMP8]
  1989. usubl v22.8h, v22.8b, v0.8b
  1990. usubl v23.8h, v23.8b, v0.8b
  1991. st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [WORKSPACE], 64
  1992. br x30
  1993. .unreq SAMPLE_DATA
  1994. .unreq START_COL
  1995. .unreq WORKSPACE
  1996. .unreq TMP1
  1997. .unreq TMP2
  1998. .unreq TMP3
  1999. .unreq TMP4
  2000. .unreq TMP5
  2001. .unreq TMP6
  2002. .unreq TMP7
  2003. .unreq TMP8
  2004. .unreq TMPDUP
  2005. /*****************************************************************************/
  2006. /*
  2007. * jsimd_fdct_islow_neon
  2008. *
  2009. * This file contains a slow-but-accurate integer implementation of the
  2010. * forward DCT (Discrete Cosine Transform). The following code is based
  2011. * directly on the IJG''s original jfdctint.c; see the jfdctint.c for
  2012. * more details.
  2013. *
  2014. * TODO: can be combined with 'jsimd_convsamp_neon' to get
  2015. * rid of a bunch of VLD1.16 instructions
  2016. */
  2017. #define CONST_BITS 13
  2018. #define PASS1_BITS 2
  2019. #define DESCALE_P1 (CONST_BITS-PASS1_BITS)
  2020. #define DESCALE_P2 (CONST_BITS+PASS1_BITS)
  2021. #define F_0_298 2446 /* FIX(0.298631336) */
  2022. #define F_0_390 3196 /* FIX(0.390180644) */
  2023. #define F_0_541 4433 /* FIX(0.541196100) */
  2024. #define F_0_765 6270 /* FIX(0.765366865) */
  2025. #define F_0_899 7373 /* FIX(0.899976223) */
  2026. #define F_1_175 9633 /* FIX(1.175875602) */
  2027. #define F_1_501 12299 /* FIX(1.501321110) */
  2028. #define F_1_847 15137 /* FIX(1.847759065) */
  2029. #define F_1_961 16069 /* FIX(1.961570560) */
  2030. #define F_2_053 16819 /* FIX(2.053119869) */
  2031. #define F_2_562 20995 /* FIX(2.562915447) */
  2032. #define F_3_072 25172 /* FIX(3.072711026) */
  2033. .balign 16
  2034. Ljsimd_fdct_islow_neon_consts:
  2035. .short F_0_298
  2036. .short -F_0_390
  2037. .short F_0_541
  2038. .short F_0_765
  2039. .short - F_0_899
  2040. .short F_1_175
  2041. .short F_1_501
  2042. .short - F_1_847
  2043. .short - F_1_961
  2044. .short F_2_053
  2045. .short - F_2_562
  2046. .short F_3_072
  2047. .short 0 /* padding */
  2048. .short 0
  2049. .short 0
  2050. .short 0
  2051. #undef F_0_298
  2052. #undef F_0_390
  2053. #undef F_0_541
  2054. #undef F_0_765
  2055. #undef F_0_899
  2056. #undef F_1_175
  2057. #undef F_1_501
  2058. #undef F_1_847
  2059. #undef F_1_961
  2060. #undef F_2_053
  2061. #undef F_2_562
  2062. #undef F_3_072
  2063. #define XFIX_P_0_298 v0.h[0]
  2064. #define XFIX_N_0_390 v0.h[1]
  2065. #define XFIX_P_0_541 v0.h[2]
  2066. #define XFIX_P_0_765 v0.h[3]
  2067. #define XFIX_N_0_899 v0.h[4]
  2068. #define XFIX_P_1_175 v0.h[5]
  2069. #define XFIX_P_1_501 v0.h[6]
  2070. #define XFIX_N_1_847 v0.h[7]
  2071. #define XFIX_N_1_961 v1.h[0]
  2072. #define XFIX_P_2_053 v1.h[1]
  2073. #define XFIX_N_2_562 v1.h[2]
  2074. #define XFIX_P_3_072 v1.h[3]
  2075. asm_function jsimd_fdct_islow_neon
  2076. DATA .req x0
  2077. TMP .req x9
  2078. /* Load constants */
  2079. adr TMP, Ljsimd_fdct_islow_neon_consts
  2080. ld1 {v0.8h, v1.8h}, [TMP]
  2081. /* Save NEON registers */
  2082. sub sp, sp, #64
  2083. mov x10, sp
  2084. st1 {v8.8b, v9.8b, v10.8b, v11.8b}, [x10], 32
  2085. st1 {v12.8b, v13.8b, v14.8b, v15.8b}, [x10], 32
  2086. /* Load all DATA into NEON registers with the following allocation:
  2087. * 0 1 2 3 | 4 5 6 7
  2088. * ---------+--------
  2089. * 0 | d16 | d17 | v16.8h
  2090. * 1 | d18 | d19 | v17.8h
  2091. * 2 | d20 | d21 | v18.8h
  2092. * 3 | d22 | d23 | v19.8h
  2093. * 4 | d24 | d25 | v20.8h
  2094. * 5 | d26 | d27 | v21.8h
  2095. * 6 | d28 | d29 | v22.8h
  2096. * 7 | d30 | d31 | v23.8h
  2097. */
  2098. ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [DATA], 64
  2099. ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [DATA]
  2100. sub DATA, DATA, #64
  2101. /* Transpose */
  2102. transpose_8x8 v16, v17, v18, v19, v20, v21, v22, v23, v31, v2, v3, v4
  2103. /* 1-D FDCT */
  2104. add v24.8h, v16.8h, v23.8h /* tmp0 = dataptr[0] + dataptr[7]; */
  2105. sub v31.8h, v16.8h, v23.8h /* tmp7 = dataptr[0] - dataptr[7]; */
  2106. add v25.8h, v17.8h, v22.8h /* tmp1 = dataptr[1] + dataptr[6]; */
  2107. sub v30.8h, v17.8h, v22.8h /* tmp6 = dataptr[1] - dataptr[6]; */
  2108. add v26.8h, v18.8h, v21.8h /* tmp2 = dataptr[2] + dataptr[5]; */
  2109. sub v29.8h, v18.8h, v21.8h /* tmp5 = dataptr[2] - dataptr[5]; */
  2110. add v27.8h, v19.8h, v20.8h /* tmp3 = dataptr[3] + dataptr[4]; */
  2111. sub v28.8h, v19.8h, v20.8h /* tmp4 = dataptr[3] - dataptr[4]; */
  2112. /* even part */
  2113. add v8.8h, v24.8h, v27.8h /* tmp10 = tmp0 + tmp3; */
  2114. sub v9.8h, v24.8h, v27.8h /* tmp13 = tmp0 - tmp3; */
  2115. add v10.8h, v25.8h, v26.8h /* tmp11 = tmp1 + tmp2; */
  2116. sub v11.8h, v25.8h, v26.8h /* tmp12 = tmp1 - tmp2; */
  2117. add v16.8h, v8.8h, v10.8h /* tmp10 + tmp11 */
  2118. sub v20.8h, v8.8h, v10.8h /* tmp10 - tmp11 */
  2119. add v18.8h, v11.8h, v9.8h /* tmp12 + tmp13 */
  2120. shl v16.8h, v16.8h, #PASS1_BITS /* dataptr[0] = (DCTELEM) LEFT_SHIFT(tmp10 + tmp11, PASS1_BITS); */
  2121. shl v20.8h, v20.8h, #PASS1_BITS /* dataptr[4] = (DCTELEM) LEFT_SHIFT(tmp10 - tmp11, PASS1_BITS); */
  2122. smull2 v24.4s, v18.8h, XFIX_P_0_541 /* z1 hi = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
  2123. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1 lo = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
  2124. mov v22.16b, v18.16b
  2125. mov v25.16b, v24.16b
  2126. smlal v18.4s, v9.4h, XFIX_P_0_765 /* lo z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
  2127. smlal2 v24.4s, v9.8h, XFIX_P_0_765 /* hi z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
  2128. smlal v22.4s, v11.4h, XFIX_N_1_847 /* lo z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
  2129. smlal2 v25.4s, v11.8h, XFIX_N_1_847 /* hi z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
  2130. rshrn v18.4h, v18.4s, #DESCALE_P1
  2131. rshrn v22.4h, v22.4s, #DESCALE_P1
  2132. rshrn2 v18.8h, v24.4s, #DESCALE_P1 /* dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, XFIX_P_0_765), CONST_BITS-PASS1_BITS); */
  2133. rshrn2 v22.8h, v25.4s, #DESCALE_P1 /* dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, XFIX_N_1_847), CONST_BITS-PASS1_BITS); */
  2134. /* Odd part */
  2135. add v8.8h, v28.8h, v31.8h /* z1 = tmp4 + tmp7; */
  2136. add v9.8h, v29.8h, v30.8h /* z2 = tmp5 + tmp6; */
  2137. add v10.8h, v28.8h, v30.8h /* z3 = tmp4 + tmp6; */
  2138. add v11.8h, v29.8h, v31.8h /* z4 = tmp5 + tmp7; */
  2139. smull v4.4s, v10.4h, XFIX_P_1_175 /* z5 lo = z3 lo * XFIX_P_1_175 */
  2140. smull2 v5.4s, v10.8h, XFIX_P_1_175
  2141. smlal v4.4s, v11.4h, XFIX_P_1_175 /* z5 = MULTIPLY(z3 + z4, FIX_1_175875602); */
  2142. smlal2 v5.4s, v11.8h, XFIX_P_1_175
  2143. smull2 v24.4s, v28.8h, XFIX_P_0_298
  2144. smull2 v25.4s, v29.8h, XFIX_P_2_053
  2145. smull2 v26.4s, v30.8h, XFIX_P_3_072
  2146. smull2 v27.4s, v31.8h, XFIX_P_1_501
  2147. smull v28.4s, v28.4h, XFIX_P_0_298 /* tmp4 = MULTIPLY(tmp4, FIX_0_298631336); */
  2148. smull v29.4s, v29.4h, XFIX_P_2_053 /* tmp5 = MULTIPLY(tmp5, FIX_2_053119869); */
  2149. smull v30.4s, v30.4h, XFIX_P_3_072 /* tmp6 = MULTIPLY(tmp6, FIX_3_072711026); */
  2150. smull v31.4s, v31.4h, XFIX_P_1_501 /* tmp7 = MULTIPLY(tmp7, FIX_1_501321110); */
  2151. smull2 v12.4s, v8.8h, XFIX_N_0_899
  2152. smull2 v13.4s, v9.8h, XFIX_N_2_562
  2153. smull2 v14.4s, v10.8h, XFIX_N_1_961
  2154. smull2 v15.4s, v11.8h, XFIX_N_0_390
  2155. smull v8.4s, v8.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, - FIX_0_899976223); */
  2156. smull v9.4s, v9.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, - FIX_2_562915447); */
  2157. smull v10.4s, v10.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, - FIX_1_961570560); */
  2158. smull v11.4s, v11.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, - FIX_0_390180644); */
  2159. add v10.4s, v10.4s, v4.4s /* z3 += z5 */
  2160. add v14.4s, v14.4s, v5.4s
  2161. add v11.4s, v11.4s, v4.4s /* z4 += z5 */
  2162. add v15.4s, v15.4s, v5.4s
  2163. add v28.4s, v28.4s, v8.4s /* tmp4 += z1 */
  2164. add v24.4s, v24.4s, v12.4s
  2165. add v29.4s, v29.4s, v9.4s /* tmp5 += z2 */
  2166. add v25.4s, v25.4s, v13.4s
  2167. add v30.4s, v30.4s, v10.4s /* tmp6 += z3 */
  2168. add v26.4s, v26.4s, v14.4s
  2169. add v31.4s, v31.4s, v11.4s /* tmp7 += z4 */
  2170. add v27.4s, v27.4s, v15.4s
  2171. add v28.4s, v28.4s, v10.4s /* tmp4 += z3 */
  2172. add v24.4s, v24.4s, v14.4s
  2173. add v29.4s, v29.4s, v11.4s /* tmp5 += z4 */
  2174. add v25.4s, v25.4s, v15.4s
  2175. add v30.4s, v30.4s, v9.4s /* tmp6 += z2 */
  2176. add v26.4s, v26.4s, v13.4s
  2177. add v31.4s, v31.4s, v8.4s /* tmp7 += z1 */
  2178. add v27.4s, v27.4s, v12.4s
  2179. rshrn v23.4h, v28.4s, #DESCALE_P1
  2180. rshrn v21.4h, v29.4s, #DESCALE_P1
  2181. rshrn v19.4h, v30.4s, #DESCALE_P1
  2182. rshrn v17.4h, v31.4s, #DESCALE_P1
  2183. rshrn2 v23.8h, v24.4s, #DESCALE_P1 /* dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); */
  2184. rshrn2 v21.8h, v25.4s, #DESCALE_P1 /* dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); */
  2185. rshrn2 v19.8h, v26.4s, #DESCALE_P1 /* dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); */
  2186. rshrn2 v17.8h, v27.4s, #DESCALE_P1 /* dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); */
  2187. /* Transpose */
  2188. transpose_8x8 v16, v17, v18, v19, v20, v21, v22, v23, v31, v2, v3, v4
  2189. /* 1-D FDCT */
  2190. add v24.8h, v16.8h, v23.8h /* tmp0 = dataptr[0] + dataptr[7]; */
  2191. sub v31.8h, v16.8h, v23.8h /* tmp7 = dataptr[0] - dataptr[7]; */
  2192. add v25.8h, v17.8h, v22.8h /* tmp1 = dataptr[1] + dataptr[6]; */
  2193. sub v30.8h, v17.8h, v22.8h /* tmp6 = dataptr[1] - dataptr[6]; */
  2194. add v26.8h, v18.8h, v21.8h /* tmp2 = dataptr[2] + dataptr[5]; */
  2195. sub v29.8h, v18.8h, v21.8h /* tmp5 = dataptr[2] - dataptr[5]; */
  2196. add v27.8h, v19.8h, v20.8h /* tmp3 = dataptr[3] + dataptr[4]; */
  2197. sub v28.8h, v19.8h, v20.8h /* tmp4 = dataptr[3] - dataptr[4]; */
  2198. /* even part */
  2199. add v8.8h, v24.8h, v27.8h /* tmp10 = tmp0 + tmp3; */
  2200. sub v9.8h, v24.8h, v27.8h /* tmp13 = tmp0 - tmp3; */
  2201. add v10.8h, v25.8h, v26.8h /* tmp11 = tmp1 + tmp2; */
  2202. sub v11.8h, v25.8h, v26.8h /* tmp12 = tmp1 - tmp2; */
  2203. add v16.8h, v8.8h, v10.8h /* tmp10 + tmp11 */
  2204. sub v20.8h, v8.8h, v10.8h /* tmp10 - tmp11 */
  2205. add v18.8h, v11.8h, v9.8h /* tmp12 + tmp13 */
  2206. srshr v16.8h, v16.8h, #PASS1_BITS /* dataptr[0] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS); */
  2207. srshr v20.8h, v20.8h, #PASS1_BITS /* dataptr[4] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS); */
  2208. smull2 v24.4s, v18.8h, XFIX_P_0_541 /* z1 hi = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
  2209. smull v18.4s, v18.4h, XFIX_P_0_541 /* z1 lo = MULTIPLY(tmp12 + tmp13, XFIX_P_0_541); */
  2210. mov v22.16b, v18.16b
  2211. mov v25.16b, v24.16b
  2212. smlal v18.4s, v9.4h, XFIX_P_0_765 /* lo z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
  2213. smlal2 v24.4s, v9.8h, XFIX_P_0_765 /* hi z1 + MULTIPLY(tmp13, XFIX_P_0_765) */
  2214. smlal v22.4s, v11.4h, XFIX_N_1_847 /* lo z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
  2215. smlal2 v25.4s, v11.8h, XFIX_N_1_847 /* hi z1 + MULTIPLY(tmp12, XFIX_N_1_847) */
  2216. rshrn v18.4h, v18.4s, #DESCALE_P2
  2217. rshrn v22.4h, v22.4s, #DESCALE_P2
  2218. rshrn2 v18.8h, v24.4s, #DESCALE_P2 /* dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, XFIX_P_0_765), CONST_BITS-PASS1_BITS); */
  2219. rshrn2 v22.8h, v25.4s, #DESCALE_P2 /* dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, XFIX_N_1_847), CONST_BITS-PASS1_BITS); */
  2220. /* Odd part */
  2221. add v8.8h, v28.8h, v31.8h /* z1 = tmp4 + tmp7; */
  2222. add v9.8h, v29.8h, v30.8h /* z2 = tmp5 + tmp6; */
  2223. add v10.8h, v28.8h, v30.8h /* z3 = tmp4 + tmp6; */
  2224. add v11.8h, v29.8h, v31.8h /* z4 = tmp5 + tmp7; */
  2225. smull v4.4s, v10.4h, XFIX_P_1_175 /* z5 lo = z3 lo * XFIX_P_1_175 */
  2226. smull2 v5.4s, v10.8h, XFIX_P_1_175
  2227. smlal v4.4s, v11.4h, XFIX_P_1_175 /* z5 = MULTIPLY(z3 + z4, FIX_1_175875602); */
  2228. smlal2 v5.4s, v11.8h, XFIX_P_1_175
  2229. smull2 v24.4s, v28.8h, XFIX_P_0_298
  2230. smull2 v25.4s, v29.8h, XFIX_P_2_053
  2231. smull2 v26.4s, v30.8h, XFIX_P_3_072
  2232. smull2 v27.4s, v31.8h, XFIX_P_1_501
  2233. smull v28.4s, v28.4h, XFIX_P_0_298 /* tmp4 = MULTIPLY(tmp4, FIX_0_298631336); */
  2234. smull v29.4s, v29.4h, XFIX_P_2_053 /* tmp5 = MULTIPLY(tmp5, FIX_2_053119869); */
  2235. smull v30.4s, v30.4h, XFIX_P_3_072 /* tmp6 = MULTIPLY(tmp6, FIX_3_072711026); */
  2236. smull v31.4s, v31.4h, XFIX_P_1_501 /* tmp7 = MULTIPLY(tmp7, FIX_1_501321110); */
  2237. smull2 v12.4s, v8.8h, XFIX_N_0_899
  2238. smull2 v13.4s, v9.8h, XFIX_N_2_562
  2239. smull2 v14.4s, v10.8h, XFIX_N_1_961
  2240. smull2 v15.4s, v11.8h, XFIX_N_0_390
  2241. smull v8.4s, v8.4h, XFIX_N_0_899 /* z1 = MULTIPLY(z1, - FIX_0_899976223); */
  2242. smull v9.4s, v9.4h, XFIX_N_2_562 /* z2 = MULTIPLY(z2, - FIX_2_562915447); */
  2243. smull v10.4s, v10.4h, XFIX_N_1_961 /* z3 = MULTIPLY(z3, - FIX_1_961570560); */
  2244. smull v11.4s, v11.4h, XFIX_N_0_390 /* z4 = MULTIPLY(z4, - FIX_0_390180644); */
  2245. add v10.4s, v10.4s, v4.4s
  2246. add v14.4s, v14.4s, v5.4s
  2247. add v11.4s, v11.4s, v4.4s
  2248. add v15.4s, v15.4s, v5.4s
  2249. add v28.4s, v28.4s, v8.4s /* tmp4 += z1 */
  2250. add v24.4s, v24.4s, v12.4s
  2251. add v29.4s, v29.4s, v9.4s /* tmp5 += z2 */
  2252. add v25.4s, v25.4s, v13.4s
  2253. add v30.4s, v30.4s, v10.4s /* tmp6 += z3 */
  2254. add v26.4s, v26.4s, v14.4s
  2255. add v31.4s, v31.4s, v11.4s /* tmp7 += z4 */
  2256. add v27.4s, v27.4s, v15.4s
  2257. add v28.4s, v28.4s, v10.4s /* tmp4 += z3 */
  2258. add v24.4s, v24.4s, v14.4s
  2259. add v29.4s, v29.4s, v11.4s /* tmp5 += z4 */
  2260. add v25.4s, v25.4s, v15.4s
  2261. add v30.4s, v30.4s, v9.4s /* tmp6 += z2 */
  2262. add v26.4s, v26.4s, v13.4s
  2263. add v31.4s, v31.4s, v8.4s /* tmp7 += z1 */
  2264. add v27.4s, v27.4s, v12.4s
  2265. rshrn v23.4h, v28.4s, #DESCALE_P2
  2266. rshrn v21.4h, v29.4s, #DESCALE_P2
  2267. rshrn v19.4h, v30.4s, #DESCALE_P2
  2268. rshrn v17.4h, v31.4s, #DESCALE_P2
  2269. rshrn2 v23.8h, v24.4s, #DESCALE_P2 /* dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); */
  2270. rshrn2 v21.8h, v25.4s, #DESCALE_P2 /* dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); */
  2271. rshrn2 v19.8h, v26.4s, #DESCALE_P2 /* dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); */
  2272. rshrn2 v17.8h, v27.4s, #DESCALE_P2 /* dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); */
  2273. /* store results */
  2274. st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [DATA], 64
  2275. st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [DATA]
  2276. /* Restore NEON registers */
  2277. ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [sp], 32
  2278. ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [sp], 32
  2279. br x30
  2280. .unreq DATA
  2281. .unreq TMP
  2282. #undef XFIX_P_0_298
  2283. #undef XFIX_N_0_390
  2284. #undef XFIX_P_0_541
  2285. #undef XFIX_P_0_765
  2286. #undef XFIX_N_0_899
  2287. #undef XFIX_P_1_175
  2288. #undef XFIX_P_1_501
  2289. #undef XFIX_N_1_847
  2290. #undef XFIX_N_1_961
  2291. #undef XFIX_P_2_053
  2292. #undef XFIX_N_2_562
  2293. #undef XFIX_P_3_072
  2294. /*****************************************************************************/
  2295. /*
  2296. * jsimd_fdct_ifast_neon
  2297. *
  2298. * This function contains a fast, not so accurate integer implementation of
  2299. * the forward DCT (Discrete Cosine Transform). It uses the same calculations
  2300. * and produces exactly the same output as IJG's original 'jpeg_fdct_ifast'
  2301. * function from jfdctfst.c
  2302. *
  2303. * TODO: can be combined with 'jsimd_convsamp_neon' to get
  2304. * rid of a bunch of VLD1.16 instructions
  2305. */
  2306. #undef XFIX_0_541196100
  2307. #define XFIX_0_382683433 v0.h[0]
  2308. #define XFIX_0_541196100 v0.h[1]
  2309. #define XFIX_0_707106781 v0.h[2]
  2310. #define XFIX_1_306562965 v0.h[3]
  2311. .balign 16
  2312. Ljsimd_fdct_ifast_neon_consts:
  2313. .short (98 * 128) /* XFIX_0_382683433 */
  2314. .short (139 * 128) /* XFIX_0_541196100 */
  2315. .short (181 * 128) /* XFIX_0_707106781 */
  2316. .short (334 * 128 - 256 * 128) /* XFIX_1_306562965 */
  2317. asm_function jsimd_fdct_ifast_neon
  2318. DATA .req x0
  2319. TMP .req x9
  2320. /* Load constants */
  2321. adr TMP, Ljsimd_fdct_ifast_neon_consts
  2322. ld1 {v0.4h}, [TMP]
  2323. /* Load all DATA into NEON registers with the following allocation:
  2324. * 0 1 2 3 | 4 5 6 7
  2325. * ---------+--------
  2326. * 0 | d16 | d17 | v0.8h
  2327. * 1 | d18 | d19 | q9
  2328. * 2 | d20 | d21 | q10
  2329. * 3 | d22 | d23 | q11
  2330. * 4 | d24 | d25 | q12
  2331. * 5 | d26 | d27 | q13
  2332. * 6 | d28 | d29 | q14
  2333. * 7 | d30 | d31 | q15
  2334. */
  2335. ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [DATA], 64
  2336. ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [DATA]
  2337. mov TMP, #2
  2338. sub DATA, DATA, #64
  2339. 1:
  2340. /* Transpose */
  2341. transpose_8x8 v16, v17, v18, v19, v20, v21, v22, v23, v1, v2, v3, v4
  2342. subs TMP, TMP, #1
  2343. /* 1-D FDCT */
  2344. add v4.8h, v19.8h, v20.8h
  2345. sub v20.8h, v19.8h, v20.8h
  2346. sub v28.8h, v18.8h, v21.8h
  2347. add v18.8h, v18.8h, v21.8h
  2348. sub v29.8h, v17.8h, v22.8h
  2349. add v17.8h, v17.8h, v22.8h
  2350. sub v21.8h, v16.8h, v23.8h
  2351. add v16.8h, v16.8h, v23.8h
  2352. sub v6.8h, v17.8h, v18.8h
  2353. sub v7.8h, v16.8h, v4.8h
  2354. add v5.8h, v17.8h, v18.8h
  2355. add v6.8h, v6.8h, v7.8h
  2356. add v4.8h, v16.8h, v4.8h
  2357. sqdmulh v6.8h, v6.8h, XFIX_0_707106781
  2358. add v19.8h, v20.8h, v28.8h
  2359. add v16.8h, v4.8h, v5.8h
  2360. sub v20.8h, v4.8h, v5.8h
  2361. add v5.8h, v28.8h, v29.8h
  2362. add v29.8h, v29.8h, v21.8h
  2363. sqdmulh v5.8h, v5.8h, XFIX_0_707106781
  2364. sub v28.8h, v19.8h, v29.8h
  2365. add v18.8h, v7.8h, v6.8h
  2366. sqdmulh v28.8h, v28.8h, XFIX_0_382683433
  2367. sub v22.8h, v7.8h, v6.8h
  2368. sqdmulh v19.8h, v19.8h, XFIX_0_541196100
  2369. sqdmulh v7.8h, v29.8h, XFIX_1_306562965
  2370. add v6.8h, v21.8h, v5.8h
  2371. sub v5.8h, v21.8h, v5.8h
  2372. add v29.8h, v29.8h, v28.8h
  2373. add v19.8h, v19.8h, v28.8h
  2374. add v29.8h, v29.8h, v7.8h
  2375. add v21.8h, v5.8h, v19.8h
  2376. sub v19.8h, v5.8h, v19.8h
  2377. add v17.8h, v6.8h, v29.8h
  2378. sub v23.8h, v6.8h, v29.8h
  2379. b.ne 1b
  2380. /* store results */
  2381. st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [DATA], 64
  2382. st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [DATA]
  2383. br x30
  2384. .unreq DATA
  2385. .unreq TMP
  2386. #undef XFIX_0_382683433
  2387. #undef XFIX_0_541196100
  2388. #undef XFIX_0_707106781
  2389. #undef XFIX_1_306562965
  2390. /*****************************************************************************/
  2391. /*
  2392. * GLOBAL(void)
  2393. * jsimd_quantize_neon (JCOEFPTR coef_block, DCTELEM *divisors,
  2394. * DCTELEM *workspace);
  2395. *
  2396. */
  2397. asm_function jsimd_quantize_neon
  2398. COEF_BLOCK .req x0
  2399. DIVISORS .req x1
  2400. WORKSPACE .req x2
  2401. RECIPROCAL .req DIVISORS
  2402. CORRECTION .req x9
  2403. SHIFT .req x10
  2404. LOOP_COUNT .req x11
  2405. mov LOOP_COUNT, #2
  2406. add CORRECTION, DIVISORS, #(64 * 2)
  2407. add SHIFT, DIVISORS, #(64 * 6)
  2408. 1:
  2409. subs LOOP_COUNT, LOOP_COUNT, #1
  2410. ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [WORKSPACE], 64
  2411. ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [CORRECTION], 64
  2412. abs v20.8h, v0.8h
  2413. abs v21.8h, v1.8h
  2414. abs v22.8h, v2.8h
  2415. abs v23.8h, v3.8h
  2416. ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [RECIPROCAL], 64
  2417. add v20.8h, v20.8h, v4.8h /* add correction */
  2418. add v21.8h, v21.8h, v5.8h
  2419. add v22.8h, v22.8h, v6.8h
  2420. add v23.8h, v23.8h, v7.8h
  2421. umull v4.4s, v20.4h, v28.4h /* multiply by reciprocal */
  2422. umull2 v16.4s, v20.8h, v28.8h
  2423. umull v5.4s, v21.4h, v29.4h
  2424. umull2 v17.4s, v21.8h, v29.8h
  2425. umull v6.4s, v22.4h, v30.4h /* multiply by reciprocal */
  2426. umull2 v18.4s, v22.8h, v30.8h
  2427. umull v7.4s, v23.4h, v31.4h
  2428. umull2 v19.4s, v23.8h, v31.8h
  2429. ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [SHIFT], 64
  2430. shrn v4.4h, v4.4s, #16
  2431. shrn v5.4h, v5.4s, #16
  2432. shrn v6.4h, v6.4s, #16
  2433. shrn v7.4h, v7.4s, #16
  2434. shrn2 v4.8h, v16.4s, #16
  2435. shrn2 v5.8h, v17.4s, #16
  2436. shrn2 v6.8h, v18.4s, #16
  2437. shrn2 v7.8h, v19.4s, #16
  2438. neg v24.8h, v24.8h
  2439. neg v25.8h, v25.8h
  2440. neg v26.8h, v26.8h
  2441. neg v27.8h, v27.8h
  2442. sshr v0.8h, v0.8h, #15 /* extract sign */
  2443. sshr v1.8h, v1.8h, #15
  2444. sshr v2.8h, v2.8h, #15
  2445. sshr v3.8h, v3.8h, #15
  2446. ushl v4.8h, v4.8h, v24.8h /* shift */
  2447. ushl v5.8h, v5.8h, v25.8h
  2448. ushl v6.8h, v6.8h, v26.8h
  2449. ushl v7.8h, v7.8h, v27.8h
  2450. eor v4.16b, v4.16b, v0.16b /* restore sign */
  2451. eor v5.16b, v5.16b, v1.16b
  2452. eor v6.16b, v6.16b, v2.16b
  2453. eor v7.16b, v7.16b, v3.16b
  2454. sub v4.8h, v4.8h, v0.8h
  2455. sub v5.8h, v5.8h, v1.8h
  2456. sub v6.8h, v6.8h, v2.8h
  2457. sub v7.8h, v7.8h, v3.8h
  2458. st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [COEF_BLOCK], 64
  2459. b.ne 1b
  2460. br x30 /* return */
  2461. .unreq COEF_BLOCK
  2462. .unreq DIVISORS
  2463. .unreq WORKSPACE
  2464. .unreq RECIPROCAL
  2465. .unreq CORRECTION
  2466. .unreq SHIFT
  2467. .unreq LOOP_COUNT
  2468. /*****************************************************************************/
  2469. /*
  2470. * Downsample pixel values of a single component.
  2471. * This version handles the common case of 2:1 horizontal and 1:1 vertical,
  2472. * without smoothing.
  2473. *
  2474. * GLOBAL(void)
  2475. * jsimd_h2v1_downsample_neon (JDIMENSION image_width, int max_v_samp_factor,
  2476. * JDIMENSION v_samp_factor,
  2477. * JDIMENSION width_blocks, JSAMPARRAY input_data,
  2478. * JSAMPARRAY output_data);
  2479. */
  2480. .balign 16
  2481. Ljsimd_h2_downsample_neon_consts:
  2482. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  2483. 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F /* diff 0 */
  2484. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  2485. 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0E /* diff 1 */
  2486. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  2487. 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0D, 0x0D /* diff 2 */
  2488. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  2489. 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0C, 0x0C, 0x0C /* diff 3 */
  2490. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  2491. 0x08, 0x09, 0x0A, 0x0B, 0x0B, 0x0B, 0x0B, 0x0B /* diff 4 */
  2492. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  2493. 0x08, 0x09, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A /* diff 5 */
  2494. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  2495. 0x08, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09 /* diff 6 */
  2496. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  2497. 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08 /* diff 7 */
  2498. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, \
  2499. 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07 /* diff 8 */
  2500. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x06, \
  2501. 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 /* diff 9 */
  2502. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x05, 0x05, \
  2503. 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05 /* diff 10 */
  2504. .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x04, 0x04, 0x04, \
  2505. 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 /* diff 11 */
  2506. .byte 0x00, 0x01, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, \
  2507. 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 /* diff 12 */
  2508. .byte 0x00, 0x01, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, \
  2509. 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02 /* diff 13 */
  2510. .byte 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, \
  2511. 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 /* diff 14 */
  2512. .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
  2513. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /* diff 15 */
  2514. asm_function jsimd_h2v1_downsample_neon
  2515. IMAGE_WIDTH .req x0
  2516. MAX_V_SAMP .req x1
  2517. V_SAMP .req x2
  2518. BLOCK_WIDTH .req x3
  2519. INPUT_DATA .req x4
  2520. OUTPUT_DATA .req x5
  2521. OUTPTR .req x9
  2522. INPTR .req x10
  2523. TMP1 .req x11
  2524. TMP2 .req x12
  2525. TMP3 .req x13
  2526. TMPDUP .req w15
  2527. mov TMPDUP, #0x10000
  2528. lsl TMP2, BLOCK_WIDTH, #4
  2529. sub TMP2, TMP2, IMAGE_WIDTH
  2530. adr TMP3, Ljsimd_h2_downsample_neon_consts
  2531. add TMP3, TMP3, TMP2, lsl #4
  2532. dup v16.4s, TMPDUP
  2533. ld1 {v18.16b}, [TMP3]
  2534. 1: /* row loop */
  2535. ldr INPTR, [INPUT_DATA], #8
  2536. ldr OUTPTR, [OUTPUT_DATA], #8
  2537. subs TMP1, BLOCK_WIDTH, #1
  2538. b.eq 3f
  2539. 2: /* columns */
  2540. ld1 {v0.16b}, [INPTR], #16
  2541. mov v4.16b, v16.16b
  2542. subs TMP1, TMP1, #1
  2543. uadalp v4.8h, v0.16b
  2544. shrn v6.8b, v4.8h, #1
  2545. st1 {v6.8b}, [OUTPTR], #8
  2546. b.ne 2b
  2547. 3: /* last columns */
  2548. ld1 {v0.16b}, [INPTR]
  2549. mov v4.16b, v16.16b
  2550. subs V_SAMP, V_SAMP, #1
  2551. /* expand right */
  2552. tbl v2.16b, {v0.16b}, v18.16b
  2553. uadalp v4.8h, v2.16b
  2554. shrn v6.8b, v4.8h, #1
  2555. st1 {v6.8b}, [OUTPTR], #8
  2556. b.ne 1b
  2557. br x30
  2558. .unreq IMAGE_WIDTH
  2559. .unreq MAX_V_SAMP
  2560. .unreq V_SAMP
  2561. .unreq BLOCK_WIDTH
  2562. .unreq INPUT_DATA
  2563. .unreq OUTPUT_DATA
  2564. .unreq OUTPTR
  2565. .unreq INPTR
  2566. .unreq TMP1
  2567. .unreq TMP2
  2568. .unreq TMP3
  2569. .unreq TMPDUP
  2570. /*****************************************************************************/
  2571. /*
  2572. * Downsample pixel values of a single component.
  2573. * This version handles the common case of 2:1 horizontal and 2:1 vertical,
  2574. * without smoothing.
  2575. *
  2576. * GLOBAL(void)
  2577. * jsimd_h2v2_downsample_neon (JDIMENSION image_width, int max_v_samp_factor,
  2578. * JDIMENSION v_samp_factor, JDIMENSION width_blocks,
  2579. * JSAMPARRAY input_data, JSAMPARRAY output_data);
  2580. */
  2581. .balign 16
  2582. asm_function jsimd_h2v2_downsample_neon
  2583. IMAGE_WIDTH .req x0
  2584. MAX_V_SAMP .req x1
  2585. V_SAMP .req x2
  2586. BLOCK_WIDTH .req x3
  2587. INPUT_DATA .req x4
  2588. OUTPUT_DATA .req x5
  2589. OUTPTR .req x9
  2590. INPTR0 .req x10
  2591. INPTR1 .req x14
  2592. TMP1 .req x11
  2593. TMP2 .req x12
  2594. TMP3 .req x13
  2595. TMPDUP .req w15
  2596. mov TMPDUP, #1
  2597. lsl TMP2, BLOCK_WIDTH, #4
  2598. lsl TMPDUP, TMPDUP, #17
  2599. sub TMP2, TMP2, IMAGE_WIDTH
  2600. adr TMP3, Ljsimd_h2_downsample_neon_consts
  2601. orr TMPDUP, TMPDUP, #1
  2602. add TMP3, TMP3, TMP2, lsl #4
  2603. dup v16.4s, TMPDUP
  2604. ld1 {v18.16b}, [TMP3]
  2605. 1: /* row loop */
  2606. ldr INPTR0, [INPUT_DATA], #8
  2607. ldr OUTPTR, [OUTPUT_DATA], #8
  2608. ldr INPTR1, [INPUT_DATA], #8
  2609. subs TMP1, BLOCK_WIDTH, #1
  2610. b.eq 3f
  2611. 2: /* columns */
  2612. ld1 {v0.16b}, [INPTR0], #16
  2613. ld1 {v1.16b}, [INPTR1], #16
  2614. mov v4.16b, v16.16b
  2615. subs TMP1, TMP1, #1
  2616. uadalp v4.8h, v0.16b
  2617. uadalp v4.8h, v1.16b
  2618. shrn v6.8b, v4.8h, #2
  2619. st1 {v6.8b}, [OUTPTR], #8
  2620. b.ne 2b
  2621. 3: /* last columns */
  2622. ld1 {v0.16b}, [INPTR0], #16
  2623. ld1 {v1.16b}, [INPTR1], #16
  2624. mov v4.16b, v16.16b
  2625. subs V_SAMP, V_SAMP, #1
  2626. /* expand right */
  2627. tbl v2.16b, {v0.16b}, v18.16b
  2628. tbl v3.16b, {v1.16b}, v18.16b
  2629. uadalp v4.8h, v2.16b
  2630. uadalp v4.8h, v3.16b
  2631. shrn v6.8b, v4.8h, #2
  2632. st1 {v6.8b}, [OUTPTR], #8
  2633. b.ne 1b
  2634. br x30
  2635. .unreq IMAGE_WIDTH
  2636. .unreq MAX_V_SAMP
  2637. .unreq V_SAMP
  2638. .unreq BLOCK_WIDTH
  2639. .unreq INPUT_DATA
  2640. .unreq OUTPUT_DATA
  2641. .unreq OUTPTR
  2642. .unreq INPTR0
  2643. .unreq INPTR1
  2644. .unreq TMP1
  2645. .unreq TMP2
  2646. .unreq TMP3
  2647. .unreq TMPDUP
  2648. /*****************************************************************************/
  2649. /*
  2650. * GLOBAL(JOCTET*)
  2651. * jsimd_huff_encode_one_block (working_state *state, JOCTET *buffer,
  2652. * JCOEFPTR block, int last_dc_val,
  2653. * c_derived_tbl *dctbl, c_derived_tbl *actbl)
  2654. *
  2655. */
  2656. BUFFER .req x1
  2657. PUT_BUFFER .req x6
  2658. PUT_BITS .req x7
  2659. PUT_BITSw .req w7
  2660. .macro emit_byte
  2661. sub PUT_BITS, PUT_BITS, #0x8
  2662. lsr x19, PUT_BUFFER, PUT_BITS
  2663. uxtb w19, w19
  2664. strb w19, [BUFFER, #1]!
  2665. cmp w19, #0xff
  2666. b.ne 14f
  2667. strb wzr, [BUFFER, #1]!
  2668. 14:
  2669. .endm
  2670. .macro put_bits CODE, SIZE
  2671. lsl PUT_BUFFER, PUT_BUFFER, \SIZE
  2672. add PUT_BITS, PUT_BITS, \SIZE
  2673. orr PUT_BUFFER, PUT_BUFFER, \CODE
  2674. .endm
  2675. .macro checkbuf31
  2676. cmp PUT_BITS, #0x20
  2677. b.lt 31f
  2678. emit_byte
  2679. emit_byte
  2680. emit_byte
  2681. emit_byte
  2682. 31:
  2683. .endm
  2684. .macro checkbuf47
  2685. cmp PUT_BITS, #0x30
  2686. b.lt 47f
  2687. emit_byte
  2688. emit_byte
  2689. emit_byte
  2690. emit_byte
  2691. emit_byte
  2692. emit_byte
  2693. 47:
  2694. .endm
  2695. .macro generate_jsimd_huff_encode_one_block fast_tbl
  2696. .balign 16
  2697. .if \fast_tbl == 1
  2698. Ljsimd_huff_encode_one_block_neon_consts:
  2699. .else
  2700. Ljsimd_huff_encode_one_block_neon_slowtbl_consts:
  2701. .endif
  2702. .byte 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, \
  2703. 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80
  2704. .if \fast_tbl == 1
  2705. .byte 0, 1, 2, 3, 16, 17, 32, 33, \
  2706. 18, 19, 4, 5, 6, 7, 20, 21 /* L0 => L3 : 4 lines OK */
  2707. .byte 34, 35, 48, 49, 255, 255, 50, 51, \
  2708. 36, 37, 22, 23, 8, 9, 10, 11 /* L0 => L3 : 4 lines OK */
  2709. .byte 8, 9, 22, 23, 36, 37, 50, 51, \
  2710. 255, 255, 255, 255, 255, 255, 52, 53 /* L1 => L4 : 4 lines OK */
  2711. .byte 54, 55, 40, 41, 26, 27, 12, 13, \
  2712. 14, 15, 28, 29, 42, 43, 56, 57 /* L0 => L3 : 4 lines OK */
  2713. .byte 6, 7, 20, 21, 34, 35, 48, 49, \
  2714. 50, 51, 36, 37, 22, 23, 8, 9 /* L4 => L7 : 4 lines OK */
  2715. .byte 42, 43, 28, 29, 14, 15, 30, 31, \
  2716. 44, 45, 58, 59, 255, 255, 255, 255 /* L1 => L4 : 4 lines OK */
  2717. .byte 255, 255, 255, 255, 56, 57, 42, 43, \
  2718. 28, 29, 14, 15, 30, 31, 44, 45 /* L3 => L6 : 4 lines OK */
  2719. .byte 26, 27, 40, 41, 42, 43, 28, 29, \
  2720. 14, 15, 30, 31, 44, 45, 46, 47 /* L5 => L7 : 3 lines OK */
  2721. .byte 255, 255, 255, 255, 0, 1, 255, 255, \
  2722. 255, 255, 255, 255, 255, 255, 255, 255 /* L4 : 1 lines OK */
  2723. .byte 255, 255, 255, 255, 255, 255, 255, 255, \
  2724. 0, 1, 16, 17, 2, 3, 255, 255 /* L5 => L6 : 2 lines OK */
  2725. .byte 255, 255, 255, 255, 255, 255, 255, 255, \
  2726. 255, 255, 255, 255, 8, 9, 22, 23 /* L5 => L6 : 2 lines OK */
  2727. .byte 4, 5, 6, 7, 255, 255, 255, 255, \
  2728. 255, 255, 255, 255, 255, 255, 255, 255 /* L7 : 1 line OK */
  2729. .endif
  2730. .if \fast_tbl == 1
  2731. asm_function jsimd_huff_encode_one_block_neon
  2732. .else
  2733. asm_function jsimd_huff_encode_one_block_neon_slowtbl
  2734. .endif
  2735. sub sp, sp, 272
  2736. sub BUFFER, BUFFER, #0x1 /* BUFFER=buffer-- */
  2737. /* Save ARM registers */
  2738. stp x19, x20, [sp]
  2739. .if \fast_tbl == 1
  2740. adr x15, Ljsimd_huff_encode_one_block_neon_consts
  2741. .else
  2742. adr x15, Ljsimd_huff_encode_one_block_neon_slowtbl_consts
  2743. .endif
  2744. ldr PUT_BUFFER, [x0, #0x10]
  2745. ldr PUT_BITSw, [x0, #0x18]
  2746. ldrsh w12, [x2] /* load DC coeff in w12 */
  2747. /* prepare data */
  2748. .if \fast_tbl == 1
  2749. ld1 {v23.16b}, [x15], #16
  2750. ld1 {v0.16b, v1.16b, v2.16b, v3.16b}, [x15], #64
  2751. ld1 {v4.16b, v5.16b, v6.16b, v7.16b}, [x15], #64
  2752. ld1 {v16.16b, v17.16b, v18.16b, v19.16b}, [x15], #64
  2753. ld1 {v24.16b, v25.16b, v26.16b, v27.16b}, [x2], #64
  2754. ld1 {v28.16b, v29.16b, v30.16b, v31.16b}, [x2], #64
  2755. sub w12, w12, w3 /* last_dc_val, not used afterwards */
  2756. /* ZigZag 8x8 */
  2757. tbl v0.16b, {v24.16b, v25.16b, v26.16b, v27.16b}, v0.16b
  2758. tbl v1.16b, {v24.16b, v25.16b, v26.16b, v27.16b}, v1.16b
  2759. tbl v2.16b, {v25.16b, v26.16b, v27.16b, v28.16b}, v2.16b
  2760. tbl v3.16b, {v24.16b, v25.16b, v26.16b, v27.16b}, v3.16b
  2761. tbl v4.16b, {v28.16b, v29.16b, v30.16b, v31.16b}, v4.16b
  2762. tbl v5.16b, {v25.16b, v26.16b, v27.16b, v28.16b}, v5.16b
  2763. tbl v6.16b, {v27.16b, v28.16b, v29.16b, v30.16b}, v6.16b
  2764. tbl v7.16b, {v29.16b, v30.16b, v31.16b}, v7.16b
  2765. ins v0.h[0], w12
  2766. tbx v1.16b, {v28.16b}, v16.16b
  2767. tbx v2.16b, {v29.16b, v30.16b}, v17.16b
  2768. tbx v5.16b, {v29.16b, v30.16b}, v18.16b
  2769. tbx v6.16b, {v31.16b}, v19.16b
  2770. .else
  2771. add x13, x2, #0x22
  2772. sub w12, w12, w3 /* last_dc_val, not used afterwards */
  2773. ld1 {v23.16b}, [x15]
  2774. add x14, x2, #0x18
  2775. add x3, x2, #0x36
  2776. ins v0.h[0], w12
  2777. add x9, x2, #0x2
  2778. ld1 {v1.h}[0], [x13]
  2779. add x15, x2, #0x30
  2780. ld1 {v2.h}[0], [x14]
  2781. add x19, x2, #0x26
  2782. ld1 {v3.h}[0], [x3]
  2783. add x20, x2, #0x28
  2784. ld1 {v0.h}[1], [x9]
  2785. add x12, x2, #0x10
  2786. ld1 {v1.h}[1], [x15]
  2787. add x13, x2, #0x40
  2788. ld1 {v2.h}[1], [x19]
  2789. add x14, x2, #0x34
  2790. ld1 {v3.h}[1], [x20]
  2791. add x3, x2, #0x1a
  2792. ld1 {v0.h}[2], [x12]
  2793. add x9, x2, #0x20
  2794. ld1 {v1.h}[2], [x13]
  2795. add x15, x2, #0x32
  2796. ld1 {v2.h}[2], [x14]
  2797. add x19, x2, #0x42
  2798. ld1 {v3.h}[2], [x3]
  2799. add x20, x2, #0xc
  2800. ld1 {v0.h}[3], [x9]
  2801. add x12, x2, #0x12
  2802. ld1 {v1.h}[3], [x15]
  2803. add x13, x2, #0x24
  2804. ld1 {v2.h}[3], [x19]
  2805. add x14, x2, #0x50
  2806. ld1 {v3.h}[3], [x20]
  2807. add x3, x2, #0xe
  2808. ld1 {v0.h}[4], [x12]
  2809. add x9, x2, #0x4
  2810. ld1 {v1.h}[4], [x13]
  2811. add x15, x2, #0x16
  2812. ld1 {v2.h}[4], [x14]
  2813. add x19, x2, #0x60
  2814. ld1 {v3.h}[4], [x3]
  2815. add x20, x2, #0x1c
  2816. ld1 {v0.h}[5], [x9]
  2817. add x12, x2, #0x6
  2818. ld1 {v1.h}[5], [x15]
  2819. add x13, x2, #0x8
  2820. ld1 {v2.h}[5], [x19]
  2821. add x14, x2, #0x52
  2822. ld1 {v3.h}[5], [x20]
  2823. add x3, x2, #0x2a
  2824. ld1 {v0.h}[6], [x12]
  2825. add x9, x2, #0x14
  2826. ld1 {v1.h}[6], [x13]
  2827. add x15, x2, #0xa
  2828. ld1 {v2.h}[6], [x14]
  2829. add x19, x2, #0x44
  2830. ld1 {v3.h}[6], [x3]
  2831. add x20, x2, #0x38
  2832. ld1 {v0.h}[7], [x9]
  2833. add x12, x2, #0x46
  2834. ld1 {v1.h}[7], [x15]
  2835. add x13, x2, #0x3a
  2836. ld1 {v2.h}[7], [x19]
  2837. add x14, x2, #0x74
  2838. ld1 {v3.h}[7], [x20]
  2839. add x3, x2, #0x6a
  2840. ld1 {v4.h}[0], [x12]
  2841. add x9, x2, #0x54
  2842. ld1 {v5.h}[0], [x13]
  2843. add x15, x2, #0x2c
  2844. ld1 {v6.h}[0], [x14]
  2845. add x19, x2, #0x76
  2846. ld1 {v7.h}[0], [x3]
  2847. add x20, x2, #0x78
  2848. ld1 {v4.h}[1], [x9]
  2849. add x12, x2, #0x62
  2850. ld1 {v5.h}[1], [x15]
  2851. add x13, x2, #0x1e
  2852. ld1 {v6.h}[1], [x19]
  2853. add x14, x2, #0x68
  2854. ld1 {v7.h}[1], [x20]
  2855. add x3, x2, #0x7a
  2856. ld1 {v4.h}[2], [x12]
  2857. add x9, x2, #0x70
  2858. ld1 {v5.h}[2], [x13]
  2859. add x15, x2, #0x2e
  2860. ld1 {v6.h}[2], [x14]
  2861. add x19, x2, #0x5a
  2862. ld1 {v7.h}[2], [x3]
  2863. add x20, x2, #0x6c
  2864. ld1 {v4.h}[3], [x9]
  2865. add x12, x2, #0x72
  2866. ld1 {v5.h}[3], [x15]
  2867. add x13, x2, #0x3c
  2868. ld1 {v6.h}[3], [x19]
  2869. add x14, x2, #0x4c
  2870. ld1 {v7.h}[3], [x20]
  2871. add x3, x2, #0x5e
  2872. ld1 {v4.h}[4], [x12]
  2873. add x9, x2, #0x64
  2874. ld1 {v5.h}[4], [x13]
  2875. add x15, x2, #0x4a
  2876. ld1 {v6.h}[4], [x14]
  2877. add x19, x2, #0x3e
  2878. ld1 {v7.h}[4], [x3]
  2879. add x20, x2, #0x6e
  2880. ld1 {v4.h}[5], [x9]
  2881. add x12, x2, #0x56
  2882. ld1 {v5.h}[5], [x15]
  2883. add x13, x2, #0x58
  2884. ld1 {v6.h}[5], [x19]
  2885. add x14, x2, #0x4e
  2886. ld1 {v7.h}[5], [x20]
  2887. add x3, x2, #0x7c
  2888. ld1 {v4.h}[6], [x12]
  2889. add x9, x2, #0x48
  2890. ld1 {v5.h}[6], [x13]
  2891. add x15, x2, #0x66
  2892. ld1 {v6.h}[6], [x14]
  2893. add x19, x2, #0x5c
  2894. ld1 {v7.h}[6], [x3]
  2895. add x20, x2, #0x7e
  2896. ld1 {v4.h}[7], [x9]
  2897. ld1 {v5.h}[7], [x15]
  2898. ld1 {v6.h}[7], [x19]
  2899. ld1 {v7.h}[7], [x20]
  2900. .endif
  2901. cmlt v24.8h, v0.8h, #0
  2902. cmlt v25.8h, v1.8h, #0
  2903. cmlt v26.8h, v2.8h, #0
  2904. cmlt v27.8h, v3.8h, #0
  2905. cmlt v28.8h, v4.8h, #0
  2906. cmlt v29.8h, v5.8h, #0
  2907. cmlt v30.8h, v6.8h, #0
  2908. cmlt v31.8h, v7.8h, #0
  2909. abs v0.8h, v0.8h
  2910. abs v1.8h, v1.8h
  2911. abs v2.8h, v2.8h
  2912. abs v3.8h, v3.8h
  2913. abs v4.8h, v4.8h
  2914. abs v5.8h, v5.8h
  2915. abs v6.8h, v6.8h
  2916. abs v7.8h, v7.8h
  2917. eor v24.16b, v24.16b, v0.16b
  2918. eor v25.16b, v25.16b, v1.16b
  2919. eor v26.16b, v26.16b, v2.16b
  2920. eor v27.16b, v27.16b, v3.16b
  2921. eor v28.16b, v28.16b, v4.16b
  2922. eor v29.16b, v29.16b, v5.16b
  2923. eor v30.16b, v30.16b, v6.16b
  2924. eor v31.16b, v31.16b, v7.16b
  2925. cmeq v16.8h, v0.8h, #0
  2926. cmeq v17.8h, v1.8h, #0
  2927. cmeq v18.8h, v2.8h, #0
  2928. cmeq v19.8h, v3.8h, #0
  2929. cmeq v20.8h, v4.8h, #0
  2930. cmeq v21.8h, v5.8h, #0
  2931. cmeq v22.8h, v6.8h, #0
  2932. xtn v16.8b, v16.8h
  2933. xtn v18.8b, v18.8h
  2934. xtn v20.8b, v20.8h
  2935. xtn v22.8b, v22.8h
  2936. umov w14, v0.h[0]
  2937. xtn2 v16.16b, v17.8h
  2938. umov w13, v24.h[0]
  2939. xtn2 v18.16b, v19.8h
  2940. clz w14, w14
  2941. xtn2 v20.16b, v21.8h
  2942. lsl w13, w13, w14
  2943. cmeq v17.8h, v7.8h, #0
  2944. sub w12, w14, #32
  2945. xtn2 v22.16b, v17.8h
  2946. lsr w13, w13, w14
  2947. and v16.16b, v16.16b, v23.16b
  2948. neg w12, w12
  2949. and v18.16b, v18.16b, v23.16b
  2950. add x3, x4, #0x400 /* r1 = dctbl->ehufsi */
  2951. and v20.16b, v20.16b, v23.16b
  2952. add x15, sp, #0x90 /* x15 = t2 */
  2953. and v22.16b, v22.16b, v23.16b
  2954. ldr w10, [x4, x12, lsl #2]
  2955. addp v16.16b, v16.16b, v18.16b
  2956. ldrb w11, [x3, x12]
  2957. addp v20.16b, v20.16b, v22.16b
  2958. checkbuf47
  2959. addp v16.16b, v16.16b, v20.16b
  2960. put_bits x10, x11
  2961. addp v16.16b, v16.16b, v18.16b
  2962. checkbuf47
  2963. umov x9,v16.D[0]
  2964. put_bits x13, x12
  2965. cnt v17.8b, v16.8b
  2966. mvn x9, x9
  2967. addv B18, v17.8b
  2968. add x4, x5, #0x400 /* x4 = actbl->ehufsi */
  2969. umov w12, v18.b[0]
  2970. lsr x9, x9, #0x1 /* clear AC coeff */
  2971. ldr w13, [x5, #0x3c0] /* x13 = actbl->ehufco[0xf0] */
  2972. rbit x9, x9 /* x9 = index0 */
  2973. ldrb w14, [x4, #0xf0] /* x14 = actbl->ehufsi[0xf0] */
  2974. cmp w12, #(64-8)
  2975. add x11, sp, #16
  2976. b.lt 4f
  2977. cbz x9, 6f
  2978. st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x11], #64
  2979. st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x11], #64
  2980. st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [x11], #64
  2981. st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [x11], #64
  2982. 1:
  2983. clz x2, x9
  2984. add x15, x15, x2, lsl #1
  2985. lsl x9, x9, x2
  2986. ldrh w20, [x15, #-126]
  2987. 2:
  2988. cmp x2, #0x10
  2989. b.lt 3f
  2990. sub x2, x2, #0x10
  2991. checkbuf47
  2992. put_bits x13, x14
  2993. b 2b
  2994. 3:
  2995. clz w20, w20
  2996. ldrh w3, [x15, #2]!
  2997. sub w11, w20, #32
  2998. lsl w3, w3, w20
  2999. neg w11, w11
  3000. lsr w3, w3, w20
  3001. add x2, x11, x2, lsl #4
  3002. lsl x9, x9, #0x1
  3003. ldr w12, [x5, x2, lsl #2]
  3004. ldrb w10, [x4, x2]
  3005. checkbuf31
  3006. put_bits x12, x10
  3007. put_bits x3, x11
  3008. cbnz x9, 1b
  3009. b 6f
  3010. 4:
  3011. movi v21.8h, #0x0010
  3012. clz v0.8h, v0.8h
  3013. clz v1.8h, v1.8h
  3014. clz v2.8h, v2.8h
  3015. clz v3.8h, v3.8h
  3016. clz v4.8h, v4.8h
  3017. clz v5.8h, v5.8h
  3018. clz v6.8h, v6.8h
  3019. clz v7.8h, v7.8h
  3020. ushl v24.8h, v24.8h, v0.8h
  3021. ushl v25.8h, v25.8h, v1.8h
  3022. ushl v26.8h, v26.8h, v2.8h
  3023. ushl v27.8h, v27.8h, v3.8h
  3024. ushl v28.8h, v28.8h, v4.8h
  3025. ushl v29.8h, v29.8h, v5.8h
  3026. ushl v30.8h, v30.8h, v6.8h
  3027. ushl v31.8h, v31.8h, v7.8h
  3028. neg v0.8h, v0.8h
  3029. neg v1.8h, v1.8h
  3030. neg v2.8h, v2.8h
  3031. neg v3.8h, v3.8h
  3032. neg v4.8h, v4.8h
  3033. neg v5.8h, v5.8h
  3034. neg v6.8h, v6.8h
  3035. neg v7.8h, v7.8h
  3036. ushl v24.8h, v24.8h, v0.8h
  3037. ushl v25.8h, v25.8h, v1.8h
  3038. ushl v26.8h, v26.8h, v2.8h
  3039. ushl v27.8h, v27.8h, v3.8h
  3040. ushl v28.8h, v28.8h, v4.8h
  3041. ushl v29.8h, v29.8h, v5.8h
  3042. ushl v30.8h, v30.8h, v6.8h
  3043. ushl v31.8h, v31.8h, v7.8h
  3044. add v0.8h, v21.8h, v0.8h
  3045. add v1.8h, v21.8h, v1.8h
  3046. add v2.8h, v21.8h, v2.8h
  3047. add v3.8h, v21.8h, v3.8h
  3048. add v4.8h, v21.8h, v4.8h
  3049. add v5.8h, v21.8h, v5.8h
  3050. add v6.8h, v21.8h, v6.8h
  3051. add v7.8h, v21.8h, v7.8h
  3052. st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [x11], #64
  3053. st1 {v4.8h, v5.8h, v6.8h, v7.8h}, [x11], #64
  3054. st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [x11], #64
  3055. st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [x11], #64
  3056. 1:
  3057. clz x2, x9
  3058. add x15, x15, x2, lsl #1
  3059. lsl x9, x9, x2
  3060. ldrh w11, [x15, #-126]
  3061. 2:
  3062. cmp x2, #0x10
  3063. b.lt 3f
  3064. sub x2, x2, #0x10
  3065. checkbuf47
  3066. put_bits x13, x14
  3067. b 2b
  3068. 3:
  3069. ldrh w3, [x15, #2]!
  3070. add x2, x11, x2, lsl #4
  3071. lsl x9, x9, #0x1
  3072. ldr w12, [x5, x2, lsl #2]
  3073. ldrb w10, [x4, x2]
  3074. checkbuf31
  3075. put_bits x12, x10
  3076. put_bits x3, x11
  3077. cbnz x9, 1b
  3078. 6:
  3079. add x13, sp, #0x10e
  3080. cmp x15, x13
  3081. b.hs 1f
  3082. ldr w12, [x5]
  3083. ldrb w14, [x4]
  3084. checkbuf47
  3085. put_bits x12, x14
  3086. 1:
  3087. str PUT_BUFFER, [x0, #0x10]
  3088. str PUT_BITSw, [x0, #0x18]
  3089. ldp x19, x20, [sp], 16
  3090. add x0, BUFFER, #0x1
  3091. add sp, sp, 256
  3092. br x30
  3093. .endm
  3094. generate_jsimd_huff_encode_one_block 1
  3095. generate_jsimd_huff_encode_one_block 0
  3096. .unreq BUFFER
  3097. .unreq PUT_BUFFER
  3098. .unreq PUT_BITS
  3099. .unreq PUT_BITSw
  3100. .purgem emit_byte
  3101. .purgem put_bits
  3102. .purgem checkbuf31
  3103. .purgem checkbuf47