ProcessRGB.cpp 164 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184
  1. #include <array>
  2. #include <string.h>
  3. #include <limits>
  4. #ifdef __ARM_NEON
  5. # include <arm_neon.h>
  6. #endif
  7. #include "Dither.hpp"
  8. #include "ForceInline.hpp"
  9. #include "Math.hpp"
  10. #include "ProcessCommon.hpp"
  11. #include "ProcessRGB.hpp"
  12. #include "Tables.hpp"
  13. #include "Vector.hpp"
  14. #if defined __SSE4_1__ || defined __AVX2__ || defined _MSC_VER
  15. # ifdef _MSC_VER
  16. # include <intrin.h>
  17. # include <Windows.h>
  18. # define _bswap(x) _byteswap_ulong(x)
  19. # define _bswap64(x) _byteswap_uint64(x)
  20. # else
  21. # include <x86intrin.h>
  22. # endif
  23. #endif
  24. #ifndef _bswap
  25. # define _bswap(x) __builtin_bswap32(x)
  26. # define _bswap64(x) __builtin_bswap64(x)
  27. #endif
  28. static const uint32_t MaxError = 1065369600; // ((38+76+14) * 255)^2
  29. // common T-/H-mode table
  30. static uint8_t tableTH[8] = { 3, 6, 11, 16, 23, 32, 41, 64 };
  31. // thresholds for the early compression-mode decision scheme
  32. // default: 0.03, 0.09, and 0.38
  33. float ecmd_threshold[3] = { 0.03f, 0.09f, 0.38f };
  34. static const uint8_t ModeUndecided = 0;
  35. static const uint8_t ModePlanar = 0x1;
  36. static const uint8_t ModeTH = 0x2;
  37. const unsigned int R = 2;
  38. const unsigned int G = 1;
  39. const unsigned int B = 0;
  40. struct Luma
  41. {
  42. #ifdef __AVX2__
  43. float max, min;
  44. uint8_t minIdx = 255, maxIdx = 255;
  45. __m128i luma8;
  46. #elif defined __ARM_NEON && defined __aarch64__
  47. float max, min;
  48. uint8_t minIdx = 255, maxIdx = 255;
  49. uint8x16_t luma8;
  50. #else
  51. uint8_t max = 0, min = 255, maxIdx = 0, minIdx = 0;
  52. uint8_t val[16];
  53. #endif
  54. };
  55. #ifdef __AVX2__
  56. struct Plane
  57. {
  58. uint64_t plane;
  59. uint64_t error;
  60. __m256i sum4;
  61. };
  62. #endif
  63. #if defined __AVX2__ || (defined __ARM_NEON && defined __aarch64__)
  64. struct Channels
  65. {
  66. #ifdef __AVX2__
  67. __m128i r8, g8, b8;
  68. #elif defined __ARM_NEON && defined __aarch64__
  69. uint8x16x2_t r, g, b;
  70. #endif
  71. };
  72. #endif
  73. namespace
  74. {
  75. static etcpak_force_inline uint8_t clamp( uint8_t min, int16_t val, uint8_t max )
  76. {
  77. return val < min ? min : ( val > max ? max : val );
  78. }
  79. static etcpak_force_inline uint8_t clampMin( uint8_t min, int16_t val )
  80. {
  81. return val < min ? min : val;
  82. }
  83. static etcpak_force_inline uint8_t clampMax( int16_t val, uint8_t max )
  84. {
  85. return val > max ? max : val;
  86. }
  87. // slightly faster than std::sort
  88. static void insertionSort( uint8_t* arr1, uint8_t* arr2 )
  89. {
  90. for( uint8_t i = 1; i < 16; ++i )
  91. {
  92. uint8_t value = arr1[i];
  93. uint8_t hole = i;
  94. for( ; hole > 0 && value < arr1[hole - 1]; --hole )
  95. {
  96. arr1[hole] = arr1[hole - 1];
  97. arr2[hole] = arr2[hole - 1];
  98. }
  99. arr1[hole] = value;
  100. arr2[hole] = i;
  101. }
  102. }
  103. //converts indices from |a0|a1|e0|e1|i0|i1|m0|m1|b0|b1|f0|f1|j0|j1|n0|n1|c0|c1|g0|g1|k0|k1|o0|o1|d0|d1|h0|h1|l0|l1|p0|p1| previously used by T- and H-modes
  104. // into |p0|o0|n0|m0|l0|k0|j0|i0|h0|g0|f0|e0|d0|c0|b0|a0|p1|o1|n1|m1|l1|k1|j1|i1|h1|g1|f1|e1|d1|c1|b1|a1| which should be used for all modes.
  105. // NO WARRANTY --- SEE STATEMENT IN TOP OF FILE (C) Ericsson AB 2005-2013. All Rights Reserved.
  106. static etcpak_force_inline int indexConversion( int pixelIndices )
  107. {
  108. int correctIndices = 0;
  109. int LSB[4][4];
  110. int MSB[4][4];
  111. int shift = 0;
  112. for( int y = 3; y >= 0; y-- )
  113. {
  114. for( int x = 3; x >= 0; x-- )
  115. {
  116. LSB[x][y] = ( pixelIndices >> shift ) & 1;
  117. shift++;
  118. MSB[x][y] = ( pixelIndices >> shift ) & 1;
  119. shift++;
  120. }
  121. }
  122. shift = 0;
  123. for( int x = 0; x < 4; x++ )
  124. {
  125. for( int y = 0; y < 4; y++ )
  126. {
  127. correctIndices |= ( LSB[x][y] << shift );
  128. correctIndices |= ( MSB[x][y] << ( 16 + shift ) );
  129. shift++;
  130. }
  131. }
  132. return correctIndices;
  133. }
  134. // Swapping two RGB-colors
  135. // NO WARRANTY --- SEE STATEMENT IN TOP OF FILE (C) Ericsson AB 2005-2013. All Rights Reserved.
  136. static etcpak_force_inline void swapColors( uint8_t( colors )[2][3] )
  137. {
  138. uint8_t temp = colors[0][R];
  139. colors[0][R] = colors[1][R];
  140. colors[1][R] = temp;
  141. temp = colors[0][G];
  142. colors[0][G] = colors[1][G];
  143. colors[1][G] = temp;
  144. temp = colors[0][B];
  145. colors[0][B] = colors[1][B];
  146. colors[1][B] = temp;
  147. }
  148. // calculates quantized colors for T or H modes
  149. void compressColor( uint8_t( currColor )[2][3], uint8_t( quantColor )[2][3], bool t_mode )
  150. {
  151. if( t_mode )
  152. {
  153. quantColor[0][R] = clampMax( 15 * ( currColor[0][R] + 8 ) / 255, 15 );
  154. quantColor[0][G] = clampMax( 15 * ( currColor[0][G] + 8 ) / 255, 15 );
  155. quantColor[0][B] = clampMax( 15 * ( currColor[0][B] + 8 ) / 255, 15 );
  156. }
  157. else // clamped to [1,14] to get a wider range
  158. {
  159. quantColor[0][R] = clamp( 1, 15 * ( currColor[0][R] + 8 ) / 255, 14 );
  160. quantColor[0][G] = clamp( 1, 15 * ( currColor[0][G] + 8 ) / 255, 14 );
  161. quantColor[0][B] = clamp( 1, 15 * ( currColor[0][B] + 8 ) / 255, 14 );
  162. }
  163. // clamped to [1,14] to get a wider range
  164. quantColor[1][R] = clamp( 1, 15 * ( currColor[1][R] + 8 ) / 255, 14 );
  165. quantColor[1][G] = clamp( 1, 15 * ( currColor[1][G] + 8 ) / 255, 14 );
  166. quantColor[1][B] = clamp( 1, 15 * ( currColor[1][B] + 8 ) / 255, 14 );
  167. }
  168. // three decoding functions come from ETCPACK v2.74 and are slightly changed.
  169. static etcpak_force_inline void decompressColor( uint8_t( colorsRGB444 )[2][3], uint8_t( colors )[2][3] )
  170. {
  171. // The color should be retrieved as:
  172. //
  173. // c = round(255/(r_bits^2-1))*comp_color
  174. //
  175. // This is similar to bit replication
  176. //
  177. // Note -- this code only work for bit replication from 4 bits and up --- 3 bits needs
  178. // two copy operations.
  179. colors[0][R] = ( colorsRGB444[0][R] << 4 ) | colorsRGB444[0][R];
  180. colors[0][G] = ( colorsRGB444[0][G] << 4 ) | colorsRGB444[0][G];
  181. colors[0][B] = ( colorsRGB444[0][B] << 4 ) | colorsRGB444[0][B];
  182. colors[1][R] = ( colorsRGB444[1][R] << 4 ) | colorsRGB444[1][R];
  183. colors[1][G] = ( colorsRGB444[1][G] << 4 ) | colorsRGB444[1][G];
  184. colors[1][B] = ( colorsRGB444[1][B] << 4 ) | colorsRGB444[1][B];
  185. }
  186. // calculates the paint colors from the block colors
  187. // using a distance d and one of the H- or T-patterns.
  188. static void calculatePaintColors59T( uint8_t d, uint8_t( colors )[2][3], uint8_t( pColors )[4][3] )
  189. {
  190. //////////////////////////////////////////////
  191. //
  192. // C3 C1 C4----C1---C2
  193. // | | |
  194. // | | |
  195. // |-------| |
  196. // | | |
  197. // | | |
  198. // C4 C2 C3
  199. //
  200. //////////////////////////////////////////////
  201. // C4
  202. pColors[3][R] = clampMin( 0, colors[1][R] - tableTH[d] );
  203. pColors[3][G] = clampMin( 0, colors[1][G] - tableTH[d] );
  204. pColors[3][B] = clampMin( 0, colors[1][B] - tableTH[d] );
  205. // C3
  206. pColors[0][R] = colors[0][R];
  207. pColors[0][G] = colors[0][G];
  208. pColors[0][B] = colors[0][B];
  209. // C2
  210. pColors[1][R] = clampMax( colors[1][R] + tableTH[d], 255 );
  211. pColors[1][G] = clampMax( colors[1][G] + tableTH[d], 255 );
  212. pColors[1][B] = clampMax( colors[1][B] + tableTH[d], 255 );
  213. // C1
  214. pColors[2][R] = colors[1][R];
  215. pColors[2][G] = colors[1][G];
  216. pColors[2][B] = colors[1][B];
  217. }
  218. static void calculatePaintColors58H( uint8_t d, uint8_t( colors )[2][3], uint8_t( pColors )[4][3] )
  219. {
  220. pColors[3][R] = clampMin( 0, colors[1][R] - tableTH[d] );
  221. pColors[3][G] = clampMin( 0, colors[1][G] - tableTH[d] );
  222. pColors[3][B] = clampMin( 0, colors[1][B] - tableTH[d] );
  223. // C1
  224. pColors[0][R] = clampMax( colors[0][R] + tableTH[d], 255 );
  225. pColors[0][G] = clampMax( colors[0][G] + tableTH[d], 255 );
  226. pColors[0][B] = clampMax( colors[0][B] + tableTH[d], 255 );
  227. // C2
  228. pColors[1][R] = clampMin( 0, colors[0][R] - tableTH[d] );
  229. pColors[1][G] = clampMin( 0, colors[0][G] - tableTH[d] );
  230. pColors[1][B] = clampMin( 0, colors[0][B] - tableTH[d] );
  231. // C3
  232. pColors[2][R] = clampMax( colors[1][R] + tableTH[d], 255 );
  233. pColors[2][G] = clampMax( colors[1][G] + tableTH[d], 255 );
  234. pColors[2][B] = clampMax( colors[1][B] + tableTH[d], 255 );
  235. }
  236. #if defined _MSC_VER && !defined __clang__
  237. static etcpak_force_inline unsigned long _bit_scan_forward( unsigned long mask )
  238. {
  239. unsigned long ret;
  240. _BitScanForward( &ret, mask );
  241. return ret;
  242. }
  243. #endif
  244. typedef std::array<uint16_t, 4> v4i;
  245. #ifdef __AVX2__
  246. static etcpak_force_inline __m256i Sum4_AVX2( const uint8_t* data) noexcept
  247. {
  248. __m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
  249. __m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
  250. __m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
  251. __m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
  252. __m128i dm0 = _mm_and_si128(d0, _mm_set1_epi32(0x00FFFFFF));
  253. __m128i dm1 = _mm_and_si128(d1, _mm_set1_epi32(0x00FFFFFF));
  254. __m128i dm2 = _mm_and_si128(d2, _mm_set1_epi32(0x00FFFFFF));
  255. __m128i dm3 = _mm_and_si128(d3, _mm_set1_epi32(0x00FFFFFF));
  256. __m256i t0 = _mm256_cvtepu8_epi16(dm0);
  257. __m256i t1 = _mm256_cvtepu8_epi16(dm1);
  258. __m256i t2 = _mm256_cvtepu8_epi16(dm2);
  259. __m256i t3 = _mm256_cvtepu8_epi16(dm3);
  260. __m256i sum0 = _mm256_add_epi16(t0, t1);
  261. __m256i sum1 = _mm256_add_epi16(t2, t3);
  262. __m256i s0 = _mm256_permute2x128_si256(sum0, sum1, (0) | (3 << 4)); // 0, 0, 3, 3
  263. __m256i s1 = _mm256_permute2x128_si256(sum0, sum1, (1) | (2 << 4)); // 1, 1, 2, 2
  264. __m256i s2 = _mm256_permute4x64_epi64(s0, _MM_SHUFFLE(1, 3, 0, 2));
  265. __m256i s3 = _mm256_permute4x64_epi64(s0, _MM_SHUFFLE(0, 2, 1, 3));
  266. __m256i s4 = _mm256_permute4x64_epi64(s1, _MM_SHUFFLE(3, 1, 0, 2));
  267. __m256i s5 = _mm256_permute4x64_epi64(s1, _MM_SHUFFLE(2, 0, 1, 3));
  268. __m256i sum5 = _mm256_add_epi16(s2, s3); // 3, 0, 3, 0
  269. __m256i sum6 = _mm256_add_epi16(s4, s5); // 2, 1, 1, 2
  270. return _mm256_add_epi16(sum5, sum6); // 3+2, 0+1, 3+1, 3+2
  271. }
  272. static etcpak_force_inline __m256i Average_AVX2( const __m256i data) noexcept
  273. {
  274. __m256i a = _mm256_add_epi16(data, _mm256_set1_epi16(4));
  275. return _mm256_srli_epi16(a, 3);
  276. }
  277. static etcpak_force_inline __m128i CalcErrorBlock_AVX2( const __m256i data, const v4i a[8]) noexcept
  278. {
  279. //
  280. __m256i a0 = _mm256_load_si256((__m256i*)a[0].data());
  281. __m256i a1 = _mm256_load_si256((__m256i*)a[4].data());
  282. // err = 8 * ( sq( average[0] ) + sq( average[1] ) + sq( average[2] ) );
  283. __m256i a4 = _mm256_madd_epi16(a0, a0);
  284. __m256i a5 = _mm256_madd_epi16(a1, a1);
  285. __m256i a6 = _mm256_hadd_epi32(a4, a5);
  286. __m256i a7 = _mm256_slli_epi32(a6, 3);
  287. __m256i a8 = _mm256_add_epi32(a7, _mm256_set1_epi32(0x3FFFFFFF)); // Big value to prevent negative values, but small enough to prevent overflow
  288. // average is not swapped
  289. // err -= block[0] * 2 * average[0];
  290. // err -= block[1] * 2 * average[1];
  291. // err -= block[2] * 2 * average[2];
  292. __m256i a2 = _mm256_slli_epi16(a0, 1);
  293. __m256i a3 = _mm256_slli_epi16(a1, 1);
  294. __m256i b0 = _mm256_madd_epi16(a2, data);
  295. __m256i b1 = _mm256_madd_epi16(a3, data);
  296. __m256i b2 = _mm256_hadd_epi32(b0, b1);
  297. __m256i b3 = _mm256_sub_epi32(a8, b2);
  298. __m256i b4 = _mm256_hadd_epi32(b3, b3);
  299. __m256i b5 = _mm256_permutevar8x32_epi32(b4, _mm256_set_epi32(0, 0, 0, 0, 5, 1, 4, 0));
  300. return _mm256_castsi256_si128(b5);
  301. }
  302. static etcpak_force_inline void ProcessAverages_AVX2(const __m256i d, v4i a[8] ) noexcept
  303. {
  304. __m256i t = _mm256_add_epi16(_mm256_mullo_epi16(d, _mm256_set1_epi16(31)), _mm256_set1_epi16(128));
  305. __m256i c = _mm256_srli_epi16(_mm256_add_epi16(t, _mm256_srli_epi16(t, 8)), 8);
  306. __m256i c1 = _mm256_shuffle_epi32(c, _MM_SHUFFLE(3, 2, 3, 2));
  307. __m256i diff = _mm256_sub_epi16(c, c1);
  308. diff = _mm256_max_epi16(diff, _mm256_set1_epi16(-4));
  309. diff = _mm256_min_epi16(diff, _mm256_set1_epi16(3));
  310. __m256i co = _mm256_add_epi16(c1, diff);
  311. c = _mm256_blend_epi16(co, c, 0xF0);
  312. __m256i a0 = _mm256_or_si256(_mm256_slli_epi16(c, 3), _mm256_srli_epi16(c, 2));
  313. _mm256_store_si256((__m256i*)a[4].data(), a0);
  314. __m256i t0 = _mm256_add_epi16(_mm256_mullo_epi16(d, _mm256_set1_epi16(15)), _mm256_set1_epi16(128));
  315. __m256i t1 = _mm256_srli_epi16(_mm256_add_epi16(t0, _mm256_srli_epi16(t0, 8)), 8);
  316. __m256i t2 = _mm256_or_si256(t1, _mm256_slli_epi16(t1, 4));
  317. _mm256_store_si256((__m256i*)a[0].data(), t2);
  318. }
  319. static etcpak_force_inline uint64_t EncodeAverages_AVX2( const v4i a[8], size_t idx ) noexcept
  320. {
  321. uint64_t d = ( idx << 24 );
  322. size_t base = idx << 1;
  323. __m128i a0 = _mm_load_si128((const __m128i*)a[base].data());
  324. __m128i r0, r1;
  325. if( ( idx & 0x2 ) == 0 )
  326. {
  327. r0 = _mm_srli_epi16(a0, 4);
  328. __m128i a1 = _mm_unpackhi_epi64(r0, r0);
  329. r1 = _mm_slli_epi16(a1, 4);
  330. }
  331. else
  332. {
  333. __m128i a1 = _mm_and_si128(a0, _mm_set1_epi16(-8));
  334. r0 = _mm_unpackhi_epi64(a1, a1);
  335. __m128i a2 = _mm_sub_epi16(a1, r0);
  336. __m128i a3 = _mm_srai_epi16(a2, 3);
  337. r1 = _mm_and_si128(a3, _mm_set1_epi16(0x07));
  338. }
  339. __m128i r2 = _mm_or_si128(r0, r1);
  340. // do missing swap for average values
  341. __m128i r3 = _mm_shufflelo_epi16(r2, _MM_SHUFFLE(3, 0, 1, 2));
  342. __m128i r4 = _mm_packus_epi16(r3, _mm_setzero_si128());
  343. d |= _mm_cvtsi128_si32(r4);
  344. return d;
  345. }
  346. static etcpak_force_inline uint64_t CheckSolid_AVX2( const uint8_t* src ) noexcept
  347. {
  348. __m256i d0 = _mm256_loadu_si256(((__m256i*)src) + 0);
  349. __m256i d1 = _mm256_loadu_si256(((__m256i*)src) + 1);
  350. __m256i c = _mm256_broadcastd_epi32(_mm256_castsi256_si128(d0));
  351. __m256i c0 = _mm256_cmpeq_epi8(d0, c);
  352. __m256i c1 = _mm256_cmpeq_epi8(d1, c);
  353. __m256i m = _mm256_and_si256(c0, c1);
  354. if (!_mm256_testc_si256(m, _mm256_set1_epi32(-1)))
  355. {
  356. return 0;
  357. }
  358. return 0x02000000 |
  359. ( (unsigned int)( src[0] & 0xF8 ) << 16 ) |
  360. ( (unsigned int)( src[1] & 0xF8 ) << 8 ) |
  361. ( (unsigned int)( src[2] & 0xF8 ) );
  362. }
  363. static etcpak_force_inline __m128i PrepareAverages_AVX2( v4i a[8], const uint8_t* src) noexcept
  364. {
  365. __m256i sum4 = Sum4_AVX2( src );
  366. ProcessAverages_AVX2(Average_AVX2( sum4 ), a );
  367. return CalcErrorBlock_AVX2( sum4, a);
  368. }
  369. static etcpak_force_inline __m128i PrepareAverages_AVX2( v4i a[8], const __m256i sum4) noexcept
  370. {
  371. ProcessAverages_AVX2(Average_AVX2( sum4 ), a );
  372. return CalcErrorBlock_AVX2( sum4, a);
  373. }
  374. static etcpak_force_inline void FindBestFit_4x2_AVX2( uint32_t terr[2][8], uint32_t tsel[8], v4i a[8], const uint32_t offset, const uint8_t* data) noexcept
  375. {
  376. __m256i sel0 = _mm256_setzero_si256();
  377. __m256i sel1 = _mm256_setzero_si256();
  378. for (unsigned int j = 0; j < 2; ++j)
  379. {
  380. unsigned int bid = offset + 1 - j;
  381. __m256i squareErrorSum = _mm256_setzero_si256();
  382. __m128i a0 = _mm_loadl_epi64((const __m128i*)a[bid].data());
  383. __m256i a1 = _mm256_broadcastq_epi64(a0);
  384. // Processing one full row each iteration
  385. for (size_t i = 0; i < 8; i += 4)
  386. {
  387. __m128i rgb = _mm_loadu_si128((const __m128i*)(data + i * 4));
  388. __m256i rgb16 = _mm256_cvtepu8_epi16(rgb);
  389. __m256i d = _mm256_sub_epi16(a1, rgb16);
  390. // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
  391. // This produces slightly different results, but is significant faster
  392. __m256i pixel0 = _mm256_madd_epi16(d, _mm256_set_epi16(0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14));
  393. __m256i pixel1 = _mm256_packs_epi32(pixel0, pixel0);
  394. __m256i pixel2 = _mm256_hadd_epi16(pixel1, pixel1);
  395. __m128i pixel3 = _mm256_castsi256_si128(pixel2);
  396. __m128i pix0 = _mm_broadcastw_epi16(pixel3);
  397. __m128i pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
  398. __m256i pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
  399. // Processing first two pixels of the row
  400. {
  401. __m256i pix = _mm256_abs_epi16(pixel);
  402. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  403. // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
  404. __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
  405. __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
  406. __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
  407. __m256i minError = _mm256_min_epi16(error0, error1);
  408. // Exploiting symmetry of the selector table and use the sign bit
  409. // This produces slightly different results, but is significant faster
  410. __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
  411. // Interleaving values so madd instruction can be used
  412. __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
  413. __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
  414. __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
  415. // Squaring the minimum error to produce correct values when adding
  416. __m256i squareError = _mm256_madd_epi16(minError2, minError2);
  417. squareErrorSum = _mm256_add_epi32(squareErrorSum, squareError);
  418. // Packing selector bits
  419. __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i + j * 8));
  420. __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i + j * 8));
  421. sel0 = _mm256_or_si256(sel0, minIndexLo2);
  422. sel1 = _mm256_or_si256(sel1, minIndexHi2);
  423. }
  424. pixel3 = _mm256_extracti128_si256(pixel2, 1);
  425. pix0 = _mm_broadcastw_epi16(pixel3);
  426. pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
  427. pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
  428. // Processing second two pixels of the row
  429. {
  430. __m256i pix = _mm256_abs_epi16(pixel);
  431. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  432. // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
  433. __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
  434. __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
  435. __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
  436. __m256i minError = _mm256_min_epi16(error0, error1);
  437. // Exploiting symmetry of the selector table and use the sign bit
  438. __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
  439. // Interleaving values so madd instruction can be used
  440. __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
  441. __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
  442. __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
  443. // Squaring the minimum error to produce correct values when adding
  444. __m256i squareError = _mm256_madd_epi16(minError2, minError2);
  445. squareErrorSum = _mm256_add_epi32(squareErrorSum, squareError);
  446. // Packing selector bits
  447. __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i + j * 8));
  448. __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i + j * 8));
  449. __m256i minIndexLo3 = _mm256_slli_epi16(minIndexLo2, 2);
  450. __m256i minIndexHi3 = _mm256_slli_epi16(minIndexHi2, 2);
  451. sel0 = _mm256_or_si256(sel0, minIndexLo3);
  452. sel1 = _mm256_or_si256(sel1, minIndexHi3);
  453. }
  454. }
  455. data += 8 * 4;
  456. _mm256_store_si256((__m256i*)terr[1 - j], squareErrorSum);
  457. }
  458. // Interleave selector bits
  459. __m256i minIndexLo0 = _mm256_unpacklo_epi16(sel0, sel1);
  460. __m256i minIndexHi0 = _mm256_unpackhi_epi16(sel0, sel1);
  461. __m256i minIndexLo1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (0) | (2 << 4));
  462. __m256i minIndexHi1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (1) | (3 << 4));
  463. __m256i minIndexHi2 = _mm256_slli_epi32(minIndexHi1, 1);
  464. __m256i sel = _mm256_or_si256(minIndexLo1, minIndexHi2);
  465. _mm256_store_si256((__m256i*)tsel, sel);
  466. }
  467. static etcpak_force_inline void FindBestFit_2x4_AVX2( uint32_t terr[2][8], uint32_t tsel[8], v4i a[8], const uint32_t offset, const uint8_t* data) noexcept
  468. {
  469. __m256i sel0 = _mm256_setzero_si256();
  470. __m256i sel1 = _mm256_setzero_si256();
  471. __m256i squareErrorSum0 = _mm256_setzero_si256();
  472. __m256i squareErrorSum1 = _mm256_setzero_si256();
  473. __m128i a0 = _mm_loadl_epi64((const __m128i*)a[offset + 1].data());
  474. __m128i a1 = _mm_loadl_epi64((const __m128i*)a[offset + 0].data());
  475. __m128i a2 = _mm_broadcastq_epi64(a0);
  476. __m128i a3 = _mm_broadcastq_epi64(a1);
  477. __m256i a4 = _mm256_insertf128_si256(_mm256_castsi128_si256(a2), a3, 1);
  478. // Processing one full row each iteration
  479. for (size_t i = 0; i < 16; i += 4)
  480. {
  481. __m128i rgb = _mm_loadu_si128((const __m128i*)(data + i * 4));
  482. __m256i rgb16 = _mm256_cvtepu8_epi16(rgb);
  483. __m256i d = _mm256_sub_epi16(a4, rgb16);
  484. // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
  485. // This produces slightly different results, but is significant faster
  486. __m256i pixel0 = _mm256_madd_epi16(d, _mm256_set_epi16(0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14));
  487. __m256i pixel1 = _mm256_packs_epi32(pixel0, pixel0);
  488. __m256i pixel2 = _mm256_hadd_epi16(pixel1, pixel1);
  489. __m128i pixel3 = _mm256_castsi256_si128(pixel2);
  490. __m128i pix0 = _mm_broadcastw_epi16(pixel3);
  491. __m128i pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
  492. __m256i pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
  493. // Processing first two pixels of the row
  494. {
  495. __m256i pix = _mm256_abs_epi16(pixel);
  496. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  497. // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
  498. __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
  499. __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
  500. __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
  501. __m256i minError = _mm256_min_epi16(error0, error1);
  502. // Exploiting symmetry of the selector table and use the sign bit
  503. __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
  504. // Interleaving values so madd instruction can be used
  505. __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
  506. __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
  507. __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
  508. // Squaring the minimum error to produce correct values when adding
  509. __m256i squareError = _mm256_madd_epi16(minError2, minError2);
  510. squareErrorSum0 = _mm256_add_epi32(squareErrorSum0, squareError);
  511. // Packing selector bits
  512. __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i));
  513. __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i));
  514. sel0 = _mm256_or_si256(sel0, minIndexLo2);
  515. sel1 = _mm256_or_si256(sel1, minIndexHi2);
  516. }
  517. pixel3 = _mm256_extracti128_si256(pixel2, 1);
  518. pix0 = _mm_broadcastw_epi16(pixel3);
  519. pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
  520. pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);
  521. // Processing second two pixels of the row
  522. {
  523. __m256i pix = _mm256_abs_epi16(pixel);
  524. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  525. // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
  526. __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
  527. __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));
  528. __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
  529. __m256i minError = _mm256_min_epi16(error0, error1);
  530. // Exploiting symmetry of the selector table and use the sign bit
  531. __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);
  532. // Interleaving values so madd instruction can be used
  533. __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
  534. __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));
  535. __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
  536. // Squaring the minimum error to produce correct values when adding
  537. __m256i squareError = _mm256_madd_epi16(minError2, minError2);
  538. squareErrorSum1 = _mm256_add_epi32(squareErrorSum1, squareError);
  539. // Packing selector bits
  540. __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i));
  541. __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i));
  542. __m256i minIndexLo3 = _mm256_slli_epi16(minIndexLo2, 2);
  543. __m256i minIndexHi3 = _mm256_slli_epi16(minIndexHi2, 2);
  544. sel0 = _mm256_or_si256(sel0, minIndexLo3);
  545. sel1 = _mm256_or_si256(sel1, minIndexHi3);
  546. }
  547. }
  548. _mm256_store_si256((__m256i*)terr[1], squareErrorSum0);
  549. _mm256_store_si256((__m256i*)terr[0], squareErrorSum1);
  550. // Interleave selector bits
  551. __m256i minIndexLo0 = _mm256_unpacklo_epi16(sel0, sel1);
  552. __m256i minIndexHi0 = _mm256_unpackhi_epi16(sel0, sel1);
  553. __m256i minIndexLo1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (0) | (2 << 4));
  554. __m256i minIndexHi1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (1) | (3 << 4));
  555. __m256i minIndexHi2 = _mm256_slli_epi32(minIndexHi1, 1);
  556. __m256i sel = _mm256_or_si256(minIndexLo1, minIndexHi2);
  557. _mm256_store_si256((__m256i*)tsel, sel);
  558. }
  559. static etcpak_force_inline uint64_t EncodeSelectors_AVX2( uint64_t d, const uint32_t terr[2][8], const uint32_t tsel[8], const bool rotate) noexcept
  560. {
  561. size_t tidx[2];
  562. // Get index of minimum error (terr[0] and terr[1])
  563. __m256i err0 = _mm256_load_si256((const __m256i*)terr[0]);
  564. __m256i err1 = _mm256_load_si256((const __m256i*)terr[1]);
  565. __m256i errLo = _mm256_permute2x128_si256(err0, err1, (0) | (2 << 4));
  566. __m256i errHi = _mm256_permute2x128_si256(err0, err1, (1) | (3 << 4));
  567. __m256i errMin0 = _mm256_min_epu32(errLo, errHi);
  568. __m256i errMin1 = _mm256_shuffle_epi32(errMin0, _MM_SHUFFLE(2, 3, 0, 1));
  569. __m256i errMin2 = _mm256_min_epu32(errMin0, errMin1);
  570. __m256i errMin3 = _mm256_shuffle_epi32(errMin2, _MM_SHUFFLE(1, 0, 3, 2));
  571. __m256i errMin4 = _mm256_min_epu32(errMin3, errMin2);
  572. __m256i errMin5 = _mm256_permute2x128_si256(errMin4, errMin4, (0) | (0 << 4));
  573. __m256i errMin6 = _mm256_permute2x128_si256(errMin4, errMin4, (1) | (1 << 4));
  574. __m256i errMask0 = _mm256_cmpeq_epi32(errMin5, err0);
  575. __m256i errMask1 = _mm256_cmpeq_epi32(errMin6, err1);
  576. uint32_t mask0 = _mm256_movemask_epi8(errMask0);
  577. uint32_t mask1 = _mm256_movemask_epi8(errMask1);
  578. tidx[0] = _bit_scan_forward(mask0) >> 2;
  579. tidx[1] = _bit_scan_forward(mask1) >> 2;
  580. d |= tidx[0] << 26;
  581. d |= tidx[1] << 29;
  582. unsigned int t0 = tsel[tidx[0]];
  583. unsigned int t1 = tsel[tidx[1]];
  584. if (!rotate)
  585. {
  586. t0 &= 0xFF00FF00;
  587. t1 &= 0x00FF00FF;
  588. }
  589. else
  590. {
  591. t0 &= 0xCCCCCCCC;
  592. t1 &= 0x33333333;
  593. }
  594. // Flip selectors from sign bit
  595. unsigned int t2 = (t0 | t1) ^ 0xFFFF0000;
  596. return d | static_cast<uint64_t>(_bswap(t2)) << 32;
  597. }
  598. static etcpak_force_inline __m128i r6g7b6_AVX2(__m128 cof, __m128 chf, __m128 cvf) noexcept
  599. {
  600. __m128i co = _mm_cvttps_epi32(cof);
  601. __m128i ch = _mm_cvttps_epi32(chf);
  602. __m128i cv = _mm_cvttps_epi32(cvf);
  603. __m128i coh = _mm_packus_epi32(co, ch);
  604. __m128i cv0 = _mm_packus_epi32(cv, _mm_setzero_si128());
  605. __m256i cohv0 = _mm256_inserti128_si256(_mm256_castsi128_si256(coh), cv0, 1);
  606. __m256i cohv1 = _mm256_min_epu16(cohv0, _mm256_set1_epi16(1023));
  607. __m256i cohv2 = _mm256_sub_epi16(cohv1, _mm256_set1_epi16(15));
  608. __m256i cohv3 = _mm256_srai_epi16(cohv2, 1);
  609. __m256i cohvrb0 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(11));
  610. __m256i cohvrb1 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(4));
  611. __m256i cohvg0 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(9));
  612. __m256i cohvg1 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(6));
  613. __m256i cohvrb2 = _mm256_srai_epi16(cohvrb0, 7);
  614. __m256i cohvrb3 = _mm256_srai_epi16(cohvrb1, 7);
  615. __m256i cohvg2 = _mm256_srai_epi16(cohvg0, 8);
  616. __m256i cohvg3 = _mm256_srai_epi16(cohvg1, 8);
  617. __m256i cohvrb4 = _mm256_sub_epi16(cohvrb0, cohvrb2);
  618. __m256i cohvrb5 = _mm256_sub_epi16(cohvrb4, cohvrb3);
  619. __m256i cohvg4 = _mm256_sub_epi16(cohvg0, cohvg2);
  620. __m256i cohvg5 = _mm256_sub_epi16(cohvg4, cohvg3);
  621. __m256i cohvrb6 = _mm256_srai_epi16(cohvrb5, 3);
  622. __m256i cohvg6 = _mm256_srai_epi16(cohvg5, 2);
  623. __m256i cohv4 = _mm256_blend_epi16(cohvg6, cohvrb6, 0x55);
  624. __m128i cohv5 = _mm_packus_epi16(_mm256_castsi256_si128(cohv4), _mm256_extracti128_si256(cohv4, 1));
  625. return _mm_shuffle_epi8(cohv5, _mm_setr_epi8(6, 5, 4, -1, 2, 1, 0, -1, 10, 9, 8, -1, -1, -1, -1, -1));
  626. }
  627. static etcpak_force_inline Plane Planar_AVX2( const Channels& ch, uint8_t& mode, bool useHeuristics )
  628. {
  629. __m128i t0 = _mm_sad_epu8( ch.r8, _mm_setzero_si128() );
  630. __m128i t1 = _mm_sad_epu8( ch.g8, _mm_setzero_si128() );
  631. __m128i t2 = _mm_sad_epu8( ch.b8, _mm_setzero_si128() );
  632. __m128i r8s = _mm_shuffle_epi8( ch.r8, _mm_set_epi8( 0xF, 0xE, 0xB, 0xA, 0x7, 0x6, 0x3, 0x2, 0xD, 0xC, 0x9, 0x8, 0x5, 0x4, 0x1, 0x0 ) );
  633. __m128i g8s = _mm_shuffle_epi8( ch.g8, _mm_set_epi8( 0xF, 0xE, 0xB, 0xA, 0x7, 0x6, 0x3, 0x2, 0xD, 0xC, 0x9, 0x8, 0x5, 0x4, 0x1, 0x0 ) );
  634. __m128i b8s = _mm_shuffle_epi8( ch.b8, _mm_set_epi8( 0xF, 0xE, 0xB, 0xA, 0x7, 0x6, 0x3, 0x2, 0xD, 0xC, 0x9, 0x8, 0x5, 0x4, 0x1, 0x0 ) );
  635. __m128i s0 = _mm_sad_epu8( r8s, _mm_setzero_si128() );
  636. __m128i s1 = _mm_sad_epu8( g8s, _mm_setzero_si128() );
  637. __m128i s2 = _mm_sad_epu8( b8s, _mm_setzero_si128() );
  638. __m256i sr0 = _mm256_insertf128_si256( _mm256_castsi128_si256( t0 ), s0, 1 );
  639. __m256i sg0 = _mm256_insertf128_si256( _mm256_castsi128_si256( t1 ), s1, 1 );
  640. __m256i sb0 = _mm256_insertf128_si256( _mm256_castsi128_si256( t2 ), s2, 1 );
  641. __m256i sr1 = _mm256_slli_epi64( sr0, 32 );
  642. __m256i sg1 = _mm256_slli_epi64( sg0, 16 );
  643. __m256i srb = _mm256_or_si256( sr1, sb0 );
  644. __m256i srgb = _mm256_or_si256( srb, sg1 );
  645. if( mode != ModePlanar && useHeuristics )
  646. {
  647. Plane plane;
  648. plane.sum4 = _mm256_permute4x64_epi64( srgb, _MM_SHUFFLE( 2, 3, 0, 1 ) );
  649. return plane;
  650. }
  651. __m128i t3 = _mm_castps_si128( _mm_shuffle_ps( _mm_castsi128_ps( t0 ), _mm_castsi128_ps( t1 ), _MM_SHUFFLE( 2, 0, 2, 0 ) ) );
  652. __m128i t4 = _mm_shuffle_epi32( t2, _MM_SHUFFLE( 3, 1, 2, 0 ) );
  653. __m128i t5 = _mm_hadd_epi32( t3, t4 );
  654. __m128i t6 = _mm_shuffle_epi32( t5, _MM_SHUFFLE( 1, 1, 1, 1 ) );
  655. __m128i t7 = _mm_shuffle_epi32( t5, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  656. __m256i sr = _mm256_broadcastw_epi16( t5 );
  657. __m256i sg = _mm256_broadcastw_epi16( t6 );
  658. __m256i sb = _mm256_broadcastw_epi16( t7 );
  659. __m256i r08 = _mm256_cvtepu8_epi16( ch.r8 );
  660. __m256i g08 = _mm256_cvtepu8_epi16( ch.g8 );
  661. __m256i b08 = _mm256_cvtepu8_epi16( ch.b8 );
  662. __m256i r16 = _mm256_slli_epi16( r08, 4 );
  663. __m256i g16 = _mm256_slli_epi16( g08, 4 );
  664. __m256i b16 = _mm256_slli_epi16( b08, 4 );
  665. __m256i difR0 = _mm256_sub_epi16( r16, sr );
  666. __m256i difG0 = _mm256_sub_epi16( g16, sg );
  667. __m256i difB0 = _mm256_sub_epi16( b16, sb );
  668. __m256i difRyz = _mm256_madd_epi16( difR0, _mm256_set_epi16( 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255 ) );
  669. __m256i difGyz = _mm256_madd_epi16( difG0, _mm256_set_epi16( 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255 ) );
  670. __m256i difByz = _mm256_madd_epi16( difB0, _mm256_set_epi16( 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255 ) );
  671. __m256i difRxz = _mm256_madd_epi16( difR0, _mm256_set_epi16( 255, 255, 255, 255, 85, 85, 85, 85, -85, -85, -85, -85, -255, -255, -255, -255 ) );
  672. __m256i difGxz = _mm256_madd_epi16( difG0, _mm256_set_epi16( 255, 255, 255, 255, 85, 85, 85, 85, -85, -85, -85, -85, -255, -255, -255, -255 ) );
  673. __m256i difBxz = _mm256_madd_epi16( difB0, _mm256_set_epi16( 255, 255, 255, 255, 85, 85, 85, 85, -85, -85, -85, -85, -255, -255, -255, -255 ) );
  674. __m256i difRGyz = _mm256_hadd_epi32( difRyz, difGyz );
  675. __m256i difByzxz = _mm256_hadd_epi32( difByz, difBxz );
  676. __m256i difRGxz = _mm256_hadd_epi32( difRxz, difGxz );
  677. __m128i sumRGyz = _mm_add_epi32( _mm256_castsi256_si128( difRGyz ), _mm256_extracti128_si256( difRGyz, 1 ) );
  678. __m128i sumByzxz = _mm_add_epi32( _mm256_castsi256_si128( difByzxz ), _mm256_extracti128_si256( difByzxz, 1 ) );
  679. __m128i sumRGxz = _mm_add_epi32( _mm256_castsi256_si128( difRGxz ), _mm256_extracti128_si256( difRGxz, 1 ) );
  680. __m128i sumRGByz = _mm_hadd_epi32( sumRGyz, sumByzxz );
  681. __m128i sumRGByzxz = _mm_hadd_epi32( sumRGxz, sumByzxz );
  682. __m128i sumRGBxz = _mm_shuffle_epi32( sumRGByzxz, _MM_SHUFFLE( 2, 3, 1, 0 ) );
  683. __m128 sumRGByzf = _mm_cvtepi32_ps( sumRGByz );
  684. __m128 sumRGBxzf = _mm_cvtepi32_ps( sumRGBxz );
  685. const float value = ( 255 * 255 * 8.0f + 85 * 85 * 8.0f ) * 16.0f;
  686. __m128 scale = _mm_set1_ps( -4.0f / value );
  687. __m128 af = _mm_mul_ps( sumRGBxzf, scale );
  688. __m128 bf = _mm_mul_ps( sumRGByzf, scale );
  689. __m128 df = _mm_mul_ps( _mm_cvtepi32_ps( t5 ), _mm_set1_ps( 4.0f / 16.0f ) );
  690. // calculating the three colors RGBO, RGBH, and RGBV. RGB = df - af * x - bf * y;
  691. __m128 cof0 = _mm_fnmadd_ps( af, _mm_set1_ps( -255.0f ), _mm_fnmadd_ps( bf, _mm_set1_ps( -255.0f ), df ) );
  692. __m128 chf0 = _mm_fnmadd_ps( af, _mm_set1_ps( 425.0f ), _mm_fnmadd_ps( bf, _mm_set1_ps( -255.0f ), df ) );
  693. __m128 cvf0 = _mm_fnmadd_ps( af, _mm_set1_ps( -255.0f ), _mm_fnmadd_ps( bf, _mm_set1_ps( 425.0f ), df ) );
  694. // convert to r6g7b6
  695. __m128i cohv = r6g7b6_AVX2( cof0, chf0, cvf0 );
  696. uint64_t rgbho = _mm_extract_epi64( cohv, 0 );
  697. uint32_t rgbv0 = _mm_extract_epi32( cohv, 2 );
  698. // Error calculation
  699. uint64_t error = 0;
  700. if( !useHeuristics )
  701. {
  702. auto ro0 = ( rgbho >> 48 ) & 0x3F;
  703. auto go0 = ( rgbho >> 40 ) & 0x7F;
  704. auto bo0 = ( rgbho >> 32 ) & 0x3F;
  705. auto ro1 = ( ro0 >> 4 ) | ( ro0 << 2 );
  706. auto go1 = ( go0 >> 6 ) | ( go0 << 1 );
  707. auto bo1 = ( bo0 >> 4 ) | ( bo0 << 2 );
  708. auto ro2 = ( ro1 << 2 ) + 2;
  709. auto go2 = ( go1 << 2 ) + 2;
  710. auto bo2 = ( bo1 << 2 ) + 2;
  711. __m256i ro3 = _mm256_set1_epi16( ro2 );
  712. __m256i go3 = _mm256_set1_epi16( go2 );
  713. __m256i bo3 = _mm256_set1_epi16( bo2 );
  714. auto rh0 = ( rgbho >> 16 ) & 0x3F;
  715. auto gh0 = ( rgbho >> 8 ) & 0x7F;
  716. auto bh0 = ( rgbho >> 0 ) & 0x3F;
  717. auto rh1 = ( rh0 >> 4 ) | ( rh0 << 2 );
  718. auto gh1 = ( gh0 >> 6 ) | ( gh0 << 1 );
  719. auto bh1 = ( bh0 >> 4 ) | ( bh0 << 2 );
  720. auto rh2 = rh1 - ro1;
  721. auto gh2 = gh1 - go1;
  722. auto bh2 = bh1 - bo1;
  723. __m256i rh3 = _mm256_set1_epi16( rh2 );
  724. __m256i gh3 = _mm256_set1_epi16( gh2 );
  725. __m256i bh3 = _mm256_set1_epi16( bh2 );
  726. auto rv0 = ( rgbv0 >> 16 ) & 0x3F;
  727. auto gv0 = ( rgbv0 >> 8 ) & 0x7F;
  728. auto bv0 = ( rgbv0 >> 0 ) & 0x3F;
  729. auto rv1 = ( rv0 >> 4 ) | ( rv0 << 2 );
  730. auto gv1 = ( gv0 >> 6 ) | ( gv0 << 1 );
  731. auto bv1 = ( bv0 >> 4 ) | ( bv0 << 2 );
  732. auto rv2 = rv1 - ro1;
  733. auto gv2 = gv1 - go1;
  734. auto bv2 = bv1 - bo1;
  735. __m256i rv3 = _mm256_set1_epi16( rv2 );
  736. __m256i gv3 = _mm256_set1_epi16( gv2 );
  737. __m256i bv3 = _mm256_set1_epi16( bv2 );
  738. __m256i x = _mm256_set_epi16( 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 );
  739. __m256i rh4 = _mm256_mullo_epi16( rh3, x );
  740. __m256i gh4 = _mm256_mullo_epi16( gh3, x );
  741. __m256i bh4 = _mm256_mullo_epi16( bh3, x );
  742. __m256i y = _mm256_set_epi16( 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0 );
  743. __m256i rv4 = _mm256_mullo_epi16( rv3, y );
  744. __m256i gv4 = _mm256_mullo_epi16( gv3, y );
  745. __m256i bv4 = _mm256_mullo_epi16( bv3, y );
  746. __m256i rxy = _mm256_add_epi16( rh4, rv4 );
  747. __m256i gxy = _mm256_add_epi16( gh4, gv4 );
  748. __m256i bxy = _mm256_add_epi16( bh4, bv4 );
  749. __m256i rp0 = _mm256_add_epi16( rxy, ro3 );
  750. __m256i gp0 = _mm256_add_epi16( gxy, go3 );
  751. __m256i bp0 = _mm256_add_epi16( bxy, bo3 );
  752. __m256i rp1 = _mm256_srai_epi16( rp0, 2 );
  753. __m256i gp1 = _mm256_srai_epi16( gp0, 2 );
  754. __m256i bp1 = _mm256_srai_epi16( bp0, 2 );
  755. __m256i rp2 = _mm256_max_epi16( _mm256_min_epi16( rp1, _mm256_set1_epi16( 255 ) ), _mm256_setzero_si256() );
  756. __m256i gp2 = _mm256_max_epi16( _mm256_min_epi16( gp1, _mm256_set1_epi16( 255 ) ), _mm256_setzero_si256() );
  757. __m256i bp2 = _mm256_max_epi16( _mm256_min_epi16( bp1, _mm256_set1_epi16( 255 ) ), _mm256_setzero_si256() );
  758. __m256i rdif = _mm256_sub_epi16( r08, rp2 );
  759. __m256i gdif = _mm256_sub_epi16( g08, gp2 );
  760. __m256i bdif = _mm256_sub_epi16( b08, bp2 );
  761. __m256i rerr = _mm256_mullo_epi16( rdif, _mm256_set1_epi16( 38 ) );
  762. __m256i gerr = _mm256_mullo_epi16( gdif, _mm256_set1_epi16( 76 ) );
  763. __m256i berr = _mm256_mullo_epi16( bdif, _mm256_set1_epi16( 14 ) );
  764. __m256i sum0 = _mm256_add_epi16( rerr, gerr );
  765. __m256i sum1 = _mm256_add_epi16( sum0, berr );
  766. __m256i sum2 = _mm256_madd_epi16( sum1, sum1 );
  767. __m128i sum3 = _mm_add_epi32( _mm256_castsi256_si128( sum2 ), _mm256_extracti128_si256( sum2, 1 ) );
  768. uint32_t err0 = _mm_extract_epi32( sum3, 0 );
  769. uint32_t err1 = _mm_extract_epi32( sum3, 1 );
  770. uint32_t err2 = _mm_extract_epi32( sum3, 2 );
  771. uint32_t err3 = _mm_extract_epi32( sum3, 3 );
  772. error = err0 + err1 + err2 + err3;
  773. }
  774. /**/
  775. uint32_t rgbv = ( rgbv0 & 0x3F ) | ( ( rgbv0 >> 2 ) & 0x1FC0 ) | ( ( rgbv0 >> 3 ) & 0x7E000 );
  776. uint64_t rgbho0_ = ( rgbho & 0x3F0000003F ) | ( ( rgbho >> 2 ) & 0x1FC000001FC0 ) | ( ( rgbho >> 3 ) & 0x7E0000007E000 );
  777. uint64_t rgbho0 = ( rgbho0_ & 0x7FFFF ) | ( ( rgbho0_ >> 13 ) & 0x3FFFF80000 );
  778. uint32_t hi = rgbv | ((rgbho0 & 0x1FFF) << 19);
  779. rgbho0 >>= 13;
  780. uint32_t lo = ( rgbho0 & 0x1 ) | ( ( rgbho0 & 0x1FE ) << 1 ) | ( ( rgbho0 & 0x600 ) << 2 ) | ( ( rgbho0 & 0x3F800 ) << 5 ) | ( ( rgbho0 & 0x1FC0000 ) << 6 );
  781. uint32_t idx = ( ( rgbho >> 33 ) & 0xF ) | ( ( rgbho >> 41 ) & 0x10 ) | ( ( rgbho >> 48 ) & 0x20 );
  782. lo |= g_flags[idx];
  783. uint64_t result = static_cast<uint32_t>(_bswap(lo));
  784. result |= static_cast<uint64_t>(static_cast<uint32_t>(_bswap(hi))) << 32;
  785. Plane plane;
  786. plane.plane = result;
  787. if( useHeuristics )
  788. {
  789. plane.error = 0;
  790. mode = ModePlanar;
  791. }
  792. else
  793. {
  794. plane.error = error;
  795. }
  796. plane.sum4 = _mm256_permute4x64_epi64(srgb, _MM_SHUFFLE(2, 3, 0, 1));
  797. return plane;
  798. }
  799. static etcpak_force_inline uint64_t EncodeSelectors_AVX2( uint64_t d, const uint32_t terr[2][8], const uint32_t tsel[8], const bool rotate, const uint64_t value, const uint32_t error) noexcept
  800. {
  801. size_t tidx[2];
  802. // Get index of minimum error (terr[0] and terr[1])
  803. __m256i err0 = _mm256_load_si256((const __m256i*)terr[0]);
  804. __m256i err1 = _mm256_load_si256((const __m256i*)terr[1]);
  805. __m256i errLo = _mm256_permute2x128_si256(err0, err1, (0) | (2 << 4));
  806. __m256i errHi = _mm256_permute2x128_si256(err0, err1, (1) | (3 << 4));
  807. __m256i errMin0 = _mm256_min_epu32(errLo, errHi);
  808. __m256i errMin1 = _mm256_shuffle_epi32(errMin0, _MM_SHUFFLE(2, 3, 0, 1));
  809. __m256i errMin2 = _mm256_min_epu32(errMin0, errMin1);
  810. __m256i errMin3 = _mm256_shuffle_epi32(errMin2, _MM_SHUFFLE(1, 0, 3, 2));
  811. __m256i errMin4 = _mm256_min_epu32(errMin3, errMin2);
  812. __m256i errMin5 = _mm256_permute2x128_si256(errMin4, errMin4, (0) | (0 << 4));
  813. __m256i errMin6 = _mm256_permute2x128_si256(errMin4, errMin4, (1) | (1 << 4));
  814. __m256i errMask0 = _mm256_cmpeq_epi32(errMin5, err0);
  815. __m256i errMask1 = _mm256_cmpeq_epi32(errMin6, err1);
  816. uint32_t mask0 = _mm256_movemask_epi8(errMask0);
  817. uint32_t mask1 = _mm256_movemask_epi8(errMask1);
  818. tidx[0] = _bit_scan_forward(mask0) >> 2;
  819. tidx[1] = _bit_scan_forward(mask1) >> 2;
  820. if ((terr[0][tidx[0]] + terr[1][tidx[1]]) >= error)
  821. {
  822. return value;
  823. }
  824. d |= tidx[0] << 26;
  825. d |= tidx[1] << 29;
  826. unsigned int t0 = tsel[tidx[0]];
  827. unsigned int t1 = tsel[tidx[1]];
  828. if (!rotate)
  829. {
  830. t0 &= 0xFF00FF00;
  831. t1 &= 0x00FF00FF;
  832. }
  833. else
  834. {
  835. t0 &= 0xCCCCCCCC;
  836. t1 &= 0x33333333;
  837. }
  838. // Flip selectors from sign bit
  839. unsigned int t2 = (t0 | t1) ^ 0xFFFF0000;
  840. return d | static_cast<uint64_t>(_bswap(t2)) << 32;
  841. }
  842. #endif
  843. static etcpak_force_inline void Average( const uint8_t* data, v4i* a )
  844. {
  845. #ifdef __SSE4_1__
  846. __m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
  847. __m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
  848. __m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
  849. __m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
  850. __m128i d0l = _mm_unpacklo_epi8(d0, _mm_setzero_si128());
  851. __m128i d0h = _mm_unpackhi_epi8(d0, _mm_setzero_si128());
  852. __m128i d1l = _mm_unpacklo_epi8(d1, _mm_setzero_si128());
  853. __m128i d1h = _mm_unpackhi_epi8(d1, _mm_setzero_si128());
  854. __m128i d2l = _mm_unpacklo_epi8(d2, _mm_setzero_si128());
  855. __m128i d2h = _mm_unpackhi_epi8(d2, _mm_setzero_si128());
  856. __m128i d3l = _mm_unpacklo_epi8(d3, _mm_setzero_si128());
  857. __m128i d3h = _mm_unpackhi_epi8(d3, _mm_setzero_si128());
  858. __m128i sum0 = _mm_add_epi16(d0l, d1l);
  859. __m128i sum1 = _mm_add_epi16(d0h, d1h);
  860. __m128i sum2 = _mm_add_epi16(d2l, d3l);
  861. __m128i sum3 = _mm_add_epi16(d2h, d3h);
  862. __m128i sum0l = _mm_unpacklo_epi16(sum0, _mm_setzero_si128());
  863. __m128i sum0h = _mm_unpackhi_epi16(sum0, _mm_setzero_si128());
  864. __m128i sum1l = _mm_unpacklo_epi16(sum1, _mm_setzero_si128());
  865. __m128i sum1h = _mm_unpackhi_epi16(sum1, _mm_setzero_si128());
  866. __m128i sum2l = _mm_unpacklo_epi16(sum2, _mm_setzero_si128());
  867. __m128i sum2h = _mm_unpackhi_epi16(sum2, _mm_setzero_si128());
  868. __m128i sum3l = _mm_unpacklo_epi16(sum3, _mm_setzero_si128());
  869. __m128i sum3h = _mm_unpackhi_epi16(sum3, _mm_setzero_si128());
  870. __m128i b0 = _mm_add_epi32(sum0l, sum0h);
  871. __m128i b1 = _mm_add_epi32(sum1l, sum1h);
  872. __m128i b2 = _mm_add_epi32(sum2l, sum2h);
  873. __m128i b3 = _mm_add_epi32(sum3l, sum3h);
  874. __m128i a0 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b2, b3), _mm_set1_epi32(4)), 3);
  875. __m128i a1 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b0, b1), _mm_set1_epi32(4)), 3);
  876. __m128i a2 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b1, b3), _mm_set1_epi32(4)), 3);
  877. __m128i a3 = _mm_srli_epi32(_mm_add_epi32(_mm_add_epi32(b0, b2), _mm_set1_epi32(4)), 3);
  878. _mm_storeu_si128((__m128i*)&a[0], _mm_packus_epi32(_mm_shuffle_epi32(a0, _MM_SHUFFLE(3, 0, 1, 2)), _mm_shuffle_epi32(a1, _MM_SHUFFLE(3, 0, 1, 2))));
  879. _mm_storeu_si128((__m128i*)&a[2], _mm_packus_epi32(_mm_shuffle_epi32(a2, _MM_SHUFFLE(3, 0, 1, 2)), _mm_shuffle_epi32(a3, _MM_SHUFFLE(3, 0, 1, 2))));
  880. #elif defined __ARM_NEON
  881. uint8x16x2_t t0 = vzipq_u8(vld1q_u8(data + 0), uint8x16_t());
  882. uint8x16x2_t t1 = vzipq_u8(vld1q_u8(data + 16), uint8x16_t());
  883. uint8x16x2_t t2 = vzipq_u8(vld1q_u8(data + 32), uint8x16_t());
  884. uint8x16x2_t t3 = vzipq_u8(vld1q_u8(data + 48), uint8x16_t());
  885. uint16x8x2_t d0 = { vreinterpretq_u16_u8(t0.val[0]), vreinterpretq_u16_u8(t0.val[1]) };
  886. uint16x8x2_t d1 = { vreinterpretq_u16_u8(t1.val[0]), vreinterpretq_u16_u8(t1.val[1]) };
  887. uint16x8x2_t d2 = { vreinterpretq_u16_u8(t2.val[0]), vreinterpretq_u16_u8(t2.val[1]) };
  888. uint16x8x2_t d3 = { vreinterpretq_u16_u8(t3.val[0]), vreinterpretq_u16_u8(t3.val[1]) };
  889. uint16x8x2_t s0 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d0.val[0] ), vreinterpretq_s16_u16( d1.val[0] ) ) ), uint16x8_t());
  890. uint16x8x2_t s1 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d0.val[1] ), vreinterpretq_s16_u16( d1.val[1] ) ) ), uint16x8_t());
  891. uint16x8x2_t s2 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d2.val[0] ), vreinterpretq_s16_u16( d3.val[0] ) ) ), uint16x8_t());
  892. uint16x8x2_t s3 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d2.val[1] ), vreinterpretq_s16_u16( d3.val[1] ) ) ), uint16x8_t());
  893. uint32x4x2_t sum0 = { vreinterpretq_u32_u16(s0.val[0]), vreinterpretq_u32_u16(s0.val[1]) };
  894. uint32x4x2_t sum1 = { vreinterpretq_u32_u16(s1.val[0]), vreinterpretq_u32_u16(s1.val[1]) };
  895. uint32x4x2_t sum2 = { vreinterpretq_u32_u16(s2.val[0]), vreinterpretq_u32_u16(s2.val[1]) };
  896. uint32x4x2_t sum3 = { vreinterpretq_u32_u16(s3.val[0]), vreinterpretq_u32_u16(s3.val[1]) };
  897. uint32x4_t b0 = vaddq_u32(sum0.val[0], sum0.val[1]);
  898. uint32x4_t b1 = vaddq_u32(sum1.val[0], sum1.val[1]);
  899. uint32x4_t b2 = vaddq_u32(sum2.val[0], sum2.val[1]);
  900. uint32x4_t b3 = vaddq_u32(sum3.val[0], sum3.val[1]);
  901. uint32x4_t a0 = vshrq_n_u32(vqaddq_u32(vqaddq_u32(b2, b3), vdupq_n_u32(4)), 3);
  902. uint32x4_t a1 = vshrq_n_u32(vqaddq_u32(vqaddq_u32(b0, b1), vdupq_n_u32(4)), 3);
  903. uint32x4_t a2 = vshrq_n_u32(vqaddq_u32(vqaddq_u32(b1, b3), vdupq_n_u32(4)), 3);
  904. uint32x4_t a3 = vshrq_n_u32(vqaddq_u32(vqaddq_u32(b0, b2), vdupq_n_u32(4)), 3);
  905. uint16x8_t o0 = vcombine_u16(vqmovun_s32(vreinterpretq_s32_u32( a0 )), vqmovun_s32(vreinterpretq_s32_u32( a1 )));
  906. uint16x8_t o1 = vcombine_u16(vqmovun_s32(vreinterpretq_s32_u32( a2 )), vqmovun_s32(vreinterpretq_s32_u32( a3 )));
  907. a[0] = v4i{o0[2], o0[1], o0[0], 0};
  908. a[1] = v4i{o0[6], o0[5], o0[4], 0};
  909. a[2] = v4i{o1[2], o1[1], o1[0], 0};
  910. a[3] = v4i{o1[6], o1[5], o1[4], 0};
  911. #else
  912. uint32_t r[4];
  913. uint32_t g[4];
  914. uint32_t b[4];
  915. memset(r, 0, sizeof(r));
  916. memset(g, 0, sizeof(g));
  917. memset(b, 0, sizeof(b));
  918. for( int j=0; j<4; j++ )
  919. {
  920. for( int i=0; i<4; i++ )
  921. {
  922. int index = (j & 2) + (i >> 1);
  923. b[index] += *data++;
  924. g[index] += *data++;
  925. r[index] += *data++;
  926. data++;
  927. }
  928. }
  929. a[0] = v4i{ uint16_t( (r[2] + r[3] + 4) / 8 ), uint16_t( (g[2] + g[3] + 4) / 8 ), uint16_t( (b[2] + b[3] + 4) / 8 ), 0};
  930. a[1] = v4i{ uint16_t( (r[0] + r[1] + 4) / 8 ), uint16_t( (g[0] + g[1] + 4) / 8 ), uint16_t( (b[0] + b[1] + 4) / 8 ), 0};
  931. a[2] = v4i{ uint16_t( (r[1] + r[3] + 4) / 8 ), uint16_t( (g[1] + g[3] + 4) / 8 ), uint16_t( (b[1] + b[3] + 4) / 8 ), 0};
  932. a[3] = v4i{ uint16_t( (r[0] + r[2] + 4) / 8 ), uint16_t( (g[0] + g[2] + 4) / 8 ), uint16_t( (b[0] + b[2] + 4) / 8 ), 0};
  933. #endif
  934. }
  935. static etcpak_force_inline void CalcErrorBlock( const uint8_t* data, unsigned int err[4][4] )
  936. {
  937. #ifdef __SSE4_1__
  938. __m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
  939. __m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
  940. __m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
  941. __m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);
  942. __m128i dm0 = _mm_and_si128(d0, _mm_set1_epi32(0x00FFFFFF));
  943. __m128i dm1 = _mm_and_si128(d1, _mm_set1_epi32(0x00FFFFFF));
  944. __m128i dm2 = _mm_and_si128(d2, _mm_set1_epi32(0x00FFFFFF));
  945. __m128i dm3 = _mm_and_si128(d3, _mm_set1_epi32(0x00FFFFFF));
  946. __m128i d0l = _mm_unpacklo_epi8(dm0, _mm_setzero_si128());
  947. __m128i d0h = _mm_unpackhi_epi8(dm0, _mm_setzero_si128());
  948. __m128i d1l = _mm_unpacklo_epi8(dm1, _mm_setzero_si128());
  949. __m128i d1h = _mm_unpackhi_epi8(dm1, _mm_setzero_si128());
  950. __m128i d2l = _mm_unpacklo_epi8(dm2, _mm_setzero_si128());
  951. __m128i d2h = _mm_unpackhi_epi8(dm2, _mm_setzero_si128());
  952. __m128i d3l = _mm_unpacklo_epi8(dm3, _mm_setzero_si128());
  953. __m128i d3h = _mm_unpackhi_epi8(dm3, _mm_setzero_si128());
  954. __m128i sum0 = _mm_add_epi16(d0l, d1l);
  955. __m128i sum1 = _mm_add_epi16(d0h, d1h);
  956. __m128i sum2 = _mm_add_epi16(d2l, d3l);
  957. __m128i sum3 = _mm_add_epi16(d2h, d3h);
  958. __m128i sum0l = _mm_unpacklo_epi16(sum0, _mm_setzero_si128());
  959. __m128i sum0h = _mm_unpackhi_epi16(sum0, _mm_setzero_si128());
  960. __m128i sum1l = _mm_unpacklo_epi16(sum1, _mm_setzero_si128());
  961. __m128i sum1h = _mm_unpackhi_epi16(sum1, _mm_setzero_si128());
  962. __m128i sum2l = _mm_unpacklo_epi16(sum2, _mm_setzero_si128());
  963. __m128i sum2h = _mm_unpackhi_epi16(sum2, _mm_setzero_si128());
  964. __m128i sum3l = _mm_unpacklo_epi16(sum3, _mm_setzero_si128());
  965. __m128i sum3h = _mm_unpackhi_epi16(sum3, _mm_setzero_si128());
  966. __m128i b0 = _mm_add_epi32(sum0l, sum0h);
  967. __m128i b1 = _mm_add_epi32(sum1l, sum1h);
  968. __m128i b2 = _mm_add_epi32(sum2l, sum2h);
  969. __m128i b3 = _mm_add_epi32(sum3l, sum3h);
  970. __m128i a0 = _mm_add_epi32(b2, b3);
  971. __m128i a1 = _mm_add_epi32(b0, b1);
  972. __m128i a2 = _mm_add_epi32(b1, b3);
  973. __m128i a3 = _mm_add_epi32(b0, b2);
  974. _mm_storeu_si128((__m128i*)&err[0], a0);
  975. _mm_storeu_si128((__m128i*)&err[1], a1);
  976. _mm_storeu_si128((__m128i*)&err[2], a2);
  977. _mm_storeu_si128((__m128i*)&err[3], a3);
  978. #elif defined __ARM_NEON
  979. uint8x16x2_t t0 = vzipq_u8(vld1q_u8(data + 0), uint8x16_t());
  980. uint8x16x2_t t1 = vzipq_u8(vld1q_u8(data + 16), uint8x16_t());
  981. uint8x16x2_t t2 = vzipq_u8(vld1q_u8(data + 32), uint8x16_t());
  982. uint8x16x2_t t3 = vzipq_u8(vld1q_u8(data + 48), uint8x16_t());
  983. uint16x8x2_t d0 = { vreinterpretq_u16_u8(t0.val[0]), vreinterpretq_u16_u8(t0.val[1]) };
  984. uint16x8x2_t d1 = { vreinterpretq_u16_u8(t1.val[0]), vreinterpretq_u16_u8(t1.val[1]) };
  985. uint16x8x2_t d2 = { vreinterpretq_u16_u8(t2.val[0]), vreinterpretq_u16_u8(t2.val[1]) };
  986. uint16x8x2_t d3 = { vreinterpretq_u16_u8(t3.val[0]), vreinterpretq_u16_u8(t3.val[1]) };
  987. uint16x8x2_t s0 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d0.val[0] ), vreinterpretq_s16_u16( d1.val[0] ))), uint16x8_t());
  988. uint16x8x2_t s1 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d0.val[1] ), vreinterpretq_s16_u16( d1.val[1] ))), uint16x8_t());
  989. uint16x8x2_t s2 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d2.val[0] ), vreinterpretq_s16_u16( d3.val[0] ))), uint16x8_t());
  990. uint16x8x2_t s3 = vzipq_u16(vreinterpretq_u16_s16( vaddq_s16(vreinterpretq_s16_u16( d2.val[1] ), vreinterpretq_s16_u16( d3.val[1] ))), uint16x8_t());
  991. uint32x4x2_t sum0 = { vreinterpretq_u32_u16(s0.val[0]), vreinterpretq_u32_u16(s0.val[1]) };
  992. uint32x4x2_t sum1 = { vreinterpretq_u32_u16(s1.val[0]), vreinterpretq_u32_u16(s1.val[1]) };
  993. uint32x4x2_t sum2 = { vreinterpretq_u32_u16(s2.val[0]), vreinterpretq_u32_u16(s2.val[1]) };
  994. uint32x4x2_t sum3 = { vreinterpretq_u32_u16(s3.val[0]), vreinterpretq_u32_u16(s3.val[1]) };
  995. uint32x4_t b0 = vaddq_u32(sum0.val[0], sum0.val[1]);
  996. uint32x4_t b1 = vaddq_u32(sum1.val[0], sum1.val[1]);
  997. uint32x4_t b2 = vaddq_u32(sum2.val[0], sum2.val[1]);
  998. uint32x4_t b3 = vaddq_u32(sum3.val[0], sum3.val[1]);
  999. uint32x4_t a0 = vreinterpretq_u32_u8( vandq_u8(vreinterpretq_u8_u32( vqaddq_u32(b2, b3) ), vreinterpretq_u8_u32( vdupq_n_u32(0x00FFFFFF)) ) );
  1000. uint32x4_t a1 = vreinterpretq_u32_u8( vandq_u8(vreinterpretq_u8_u32( vqaddq_u32(b0, b1) ), vreinterpretq_u8_u32( vdupq_n_u32(0x00FFFFFF)) ) );
  1001. uint32x4_t a2 = vreinterpretq_u32_u8( vandq_u8(vreinterpretq_u8_u32( vqaddq_u32(b1, b3) ), vreinterpretq_u8_u32( vdupq_n_u32(0x00FFFFFF)) ) );
  1002. uint32x4_t a3 = vreinterpretq_u32_u8( vandq_u8(vreinterpretq_u8_u32( vqaddq_u32(b0, b2) ), vreinterpretq_u8_u32( vdupq_n_u32(0x00FFFFFF)) ) );
  1003. vst1q_u32(err[0], a0);
  1004. vst1q_u32(err[1], a1);
  1005. vst1q_u32(err[2], a2);
  1006. vst1q_u32(err[3], a3);
  1007. #else
  1008. unsigned int terr[4][4];
  1009. memset(terr, 0, 16 * sizeof(unsigned int));
  1010. for( int j=0; j<4; j++ )
  1011. {
  1012. for( int i=0; i<4; i++ )
  1013. {
  1014. int index = (j & 2) + (i >> 1);
  1015. unsigned int d = *data++;
  1016. terr[index][0] += d;
  1017. d = *data++;
  1018. terr[index][1] += d;
  1019. d = *data++;
  1020. terr[index][2] += d;
  1021. data++;
  1022. }
  1023. }
  1024. for( int i=0; i<3; i++ )
  1025. {
  1026. err[0][i] = terr[2][i] + terr[3][i];
  1027. err[1][i] = terr[0][i] + terr[1][i];
  1028. err[2][i] = terr[1][i] + terr[3][i];
  1029. err[3][i] = terr[0][i] + terr[2][i];
  1030. }
  1031. for( int i=0; i<4; i++ )
  1032. {
  1033. err[i][3] = 0;
  1034. }
  1035. #endif
  1036. }
  1037. static etcpak_force_inline unsigned int CalcError( const unsigned int block[4], const v4i& average )
  1038. {
  1039. unsigned int err = 0x3FFFFFFF; // Big value to prevent negative values, but small enough to prevent overflow
  1040. err -= block[0] * 2 * average[2];
  1041. err -= block[1] * 2 * average[1];
  1042. err -= block[2] * 2 * average[0];
  1043. err += 8 * ( sq( average[0] ) + sq( average[1] ) + sq( average[2] ) );
  1044. return err;
  1045. }
  1046. static etcpak_force_inline void ProcessAverages( v4i* a )
  1047. {
  1048. #ifdef __SSE4_1__
  1049. for( int i=0; i<2; i++ )
  1050. {
  1051. __m128i d = _mm_loadu_si128((__m128i*)a[i*2].data());
  1052. __m128i t = _mm_add_epi16(_mm_mullo_epi16(d, _mm_set1_epi16(31)), _mm_set1_epi16(128));
  1053. __m128i c = _mm_srli_epi16(_mm_add_epi16(t, _mm_srli_epi16(t, 8)), 8);
  1054. __m128i c1 = _mm_shuffle_epi32(c, _MM_SHUFFLE(3, 2, 3, 2));
  1055. __m128i diff = _mm_sub_epi16(c, c1);
  1056. diff = _mm_max_epi16(diff, _mm_set1_epi16(-4));
  1057. diff = _mm_min_epi16(diff, _mm_set1_epi16(3));
  1058. __m128i co = _mm_add_epi16(c1, diff);
  1059. c = _mm_blend_epi16(co, c, 0xF0);
  1060. __m128i a0 = _mm_or_si128(_mm_slli_epi16(c, 3), _mm_srli_epi16(c, 2));
  1061. _mm_storeu_si128((__m128i*)a[4+i*2].data(), a0);
  1062. }
  1063. for( int i=0; i<2; i++ )
  1064. {
  1065. __m128i d = _mm_loadu_si128((__m128i*)a[i*2].data());
  1066. __m128i t0 = _mm_add_epi16(_mm_mullo_epi16(d, _mm_set1_epi16(15)), _mm_set1_epi16(128));
  1067. __m128i t1 = _mm_srli_epi16(_mm_add_epi16(t0, _mm_srli_epi16(t0, 8)), 8);
  1068. __m128i t2 = _mm_or_si128(t1, _mm_slli_epi16(t1, 4));
  1069. _mm_storeu_si128((__m128i*)a[i*2].data(), t2);
  1070. }
  1071. #elif defined __ARM_NEON
  1072. for( int i=0; i<2; i++ )
  1073. {
  1074. int16x8_t d = vld1q_s16((int16_t*)&a[i*2]);
  1075. int16x8_t t = vaddq_s16(vmulq_s16(d, vdupq_n_s16(31)), vdupq_n_s16(128));
  1076. int16x8_t c = vshrq_n_s16(vaddq_s16(t, vshrq_n_s16(t, 8)), 8);
  1077. int16x8_t c1 = vcombine_s16(vget_high_s16(c), vget_high_s16(c));
  1078. int16x8_t diff = vsubq_s16(c, c1);
  1079. diff = vmaxq_s16(diff, vdupq_n_s16(-4));
  1080. diff = vminq_s16(diff, vdupq_n_s16(3));
  1081. int16x8_t co = vaddq_s16(c1, diff);
  1082. c = vcombine_s16(vget_low_s16(co), vget_high_s16(c));
  1083. int16x8_t a0 = vorrq_s16(vshlq_n_s16(c, 3), vshrq_n_s16(c, 2));
  1084. vst1q_s16((int16_t*)&a[4+i*2], a0);
  1085. }
  1086. for( int i=0; i<2; i++ )
  1087. {
  1088. int16x8_t d = vld1q_s16((int16_t*)&a[i*2]);
  1089. int16x8_t t0 = vaddq_s16(vmulq_s16(d, vdupq_n_s16(15)), vdupq_n_s16(128));
  1090. int16x8_t t1 = vshrq_n_s16(vaddq_s16(t0, vshrq_n_s16(t0, 8)), 8);
  1091. int16x8_t t2 = vorrq_s16(t1, vshlq_n_s16(t1, 4));
  1092. vst1q_s16((int16_t*)&a[i*2], t2);
  1093. }
  1094. #else
  1095. for( int i=0; i<2; i++ )
  1096. {
  1097. for( int j=0; j<3; j++ )
  1098. {
  1099. int32_t c1 = mul8bit( a[i*2+1][j], 31 );
  1100. int32_t c2 = mul8bit( a[i*2][j], 31 );
  1101. int32_t diff = c2 - c1;
  1102. if( diff > 3 ) diff = 3;
  1103. else if( diff < -4 ) diff = -4;
  1104. int32_t co = c1 + diff;
  1105. a[5+i*2][j] = ( c1 << 3 ) | ( c1 >> 2 );
  1106. a[4+i*2][j] = ( co << 3 ) | ( co >> 2 );
  1107. }
  1108. }
  1109. for( int i=0; i<4; i++ )
  1110. {
  1111. a[i][0] = g_avg2[mul8bit( a[i][0], 15 )];
  1112. a[i][1] = g_avg2[mul8bit( a[i][1], 15 )];
  1113. a[i][2] = g_avg2[mul8bit( a[i][2], 15 )];
  1114. }
  1115. #endif
  1116. }
  1117. static etcpak_force_inline void EncodeAverages( uint64_t& _d, const v4i* a, size_t idx )
  1118. {
  1119. auto d = _d;
  1120. d |= ( idx << 24 );
  1121. size_t base = idx << 1;
  1122. if( ( idx & 0x2 ) == 0 )
  1123. {
  1124. for( int i=0; i<3; i++ )
  1125. {
  1126. d |= uint64_t( a[base+0][i] >> 4 ) << ( i*8 );
  1127. d |= uint64_t( a[base+1][i] >> 4 ) << ( i*8 + 4 );
  1128. }
  1129. }
  1130. else
  1131. {
  1132. for( int i=0; i<3; i++ )
  1133. {
  1134. d |= uint64_t( a[base+1][i] & 0xF8 ) << ( i*8 );
  1135. int32_t c = ( ( a[base+0][i] & 0xF8 ) - ( a[base+1][i] & 0xF8 ) ) >> 3;
  1136. c &= ~0xFFFFFFF8;
  1137. d |= ((uint64_t)c) << ( i*8 );
  1138. }
  1139. }
  1140. _d = d;
  1141. }
  1142. static etcpak_force_inline uint64_t CheckSolid( const uint8_t* src )
  1143. {
  1144. #ifdef __SSE4_1__
  1145. __m128i d0 = _mm_loadu_si128(((__m128i*)src) + 0);
  1146. __m128i d1 = _mm_loadu_si128(((__m128i*)src) + 1);
  1147. __m128i d2 = _mm_loadu_si128(((__m128i*)src) + 2);
  1148. __m128i d3 = _mm_loadu_si128(((__m128i*)src) + 3);
  1149. __m128i c = _mm_shuffle_epi32(d0, _MM_SHUFFLE(0, 0, 0, 0));
  1150. __m128i c0 = _mm_cmpeq_epi8(d0, c);
  1151. __m128i c1 = _mm_cmpeq_epi8(d1, c);
  1152. __m128i c2 = _mm_cmpeq_epi8(d2, c);
  1153. __m128i c3 = _mm_cmpeq_epi8(d3, c);
  1154. __m128i m0 = _mm_and_si128(c0, c1);
  1155. __m128i m1 = _mm_and_si128(c2, c3);
  1156. __m128i m = _mm_and_si128(m0, m1);
  1157. if (!_mm_testc_si128(m, _mm_set1_epi32(-1)))
  1158. {
  1159. return 0;
  1160. }
  1161. #elif defined __ARM_NEON
  1162. int32x4_t d0 = vld1q_s32((int32_t*)src + 0);
  1163. int32x4_t d1 = vld1q_s32((int32_t*)src + 4);
  1164. int32x4_t d2 = vld1q_s32((int32_t*)src + 8);
  1165. int32x4_t d3 = vld1q_s32((int32_t*)src + 12);
  1166. int32x4_t c = vdupq_n_s32(d0[0]);
  1167. int32x4_t c0 = vreinterpretq_s32_u32(vceqq_s32(d0, c));
  1168. int32x4_t c1 = vreinterpretq_s32_u32(vceqq_s32(d1, c));
  1169. int32x4_t c2 = vreinterpretq_s32_u32(vceqq_s32(d2, c));
  1170. int32x4_t c3 = vreinterpretq_s32_u32(vceqq_s32(d3, c));
  1171. int32x4_t m0 = vandq_s32(c0, c1);
  1172. int32x4_t m1 = vandq_s32(c2, c3);
  1173. int64x2_t m = vreinterpretq_s64_s32(vandq_s32(m0, m1));
  1174. if (m[0] != -1 || m[1] != -1)
  1175. {
  1176. return 0;
  1177. }
  1178. #else
  1179. const uint8_t* ptr = src + 4;
  1180. for( int i=1; i<16; i++ )
  1181. {
  1182. if( memcmp( src, ptr, 4 ) != 0 )
  1183. {
  1184. return 0;
  1185. }
  1186. ptr += 4;
  1187. }
  1188. #endif
  1189. return 0x02000000 |
  1190. ( (unsigned int)( src[0] & 0xF8 ) << 16 ) |
  1191. ( (unsigned int)( src[1] & 0xF8 ) << 8 ) |
  1192. ( (unsigned int)( src[2] & 0xF8 ) );
  1193. }
  1194. static etcpak_force_inline void PrepareAverages( v4i a[8], const uint8_t* src, unsigned int err[4] )
  1195. {
  1196. Average( src, a );
  1197. ProcessAverages( a );
  1198. unsigned int errblock[4][4];
  1199. CalcErrorBlock( src, errblock );
  1200. for( int i=0; i<4; i++ )
  1201. {
  1202. err[i/2] += CalcError( errblock[i], a[i] );
  1203. err[2+i/2] += CalcError( errblock[i], a[i+4] );
  1204. }
  1205. }
  1206. static etcpak_force_inline void FindBestFit( uint64_t terr[2][8], uint16_t tsel[16][8], v4i a[8], const uint32_t* id, const uint8_t* data )
  1207. {
  1208. for( size_t i=0; i<16; i++ )
  1209. {
  1210. uint16_t* sel = tsel[i];
  1211. unsigned int bid = id[i];
  1212. uint64_t* ter = terr[bid%2];
  1213. uint8_t b = *data++;
  1214. uint8_t g = *data++;
  1215. uint8_t r = *data++;
  1216. data++;
  1217. int dr = a[bid][0] - r;
  1218. int dg = a[bid][1] - g;
  1219. int db = a[bid][2] - b;
  1220. #ifdef __SSE4_1__
  1221. // Reference implementation
  1222. __m128i pix = _mm_set1_epi32(dr * 77 + dg * 151 + db * 28);
  1223. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  1224. __m128i error0 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[0]));
  1225. __m128i error1 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[1]));
  1226. __m128i error2 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[0]));
  1227. __m128i error3 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[1]));
  1228. __m128i index0 = _mm_and_si128(_mm_cmplt_epi32(error1, error0), _mm_set1_epi32(1));
  1229. __m128i minError0 = _mm_min_epi32(error0, error1);
  1230. __m128i index1 = _mm_sub_epi32(_mm_set1_epi32(2), _mm_cmplt_epi32(error3, error2));
  1231. __m128i minError1 = _mm_min_epi32(error2, error3);
  1232. __m128i minIndex0 = _mm_blendv_epi8(index0, index1, _mm_cmplt_epi32(minError1, minError0));
  1233. __m128i minError = _mm_min_epi32(minError0, minError1);
  1234. // Squaring the minimum error to produce correct values when adding
  1235. __m128i minErrorLow = _mm_shuffle_epi32(minError, _MM_SHUFFLE(1, 1, 0, 0));
  1236. __m128i squareErrorLow = _mm_mul_epi32(minErrorLow, minErrorLow);
  1237. squareErrorLow = _mm_add_epi64(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 0));
  1238. _mm_storeu_si128(((__m128i*)ter) + 0, squareErrorLow);
  1239. __m128i minErrorHigh = _mm_shuffle_epi32(minError, _MM_SHUFFLE(3, 3, 2, 2));
  1240. __m128i squareErrorHigh = _mm_mul_epi32(minErrorHigh, minErrorHigh);
  1241. squareErrorHigh = _mm_add_epi64(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 1));
  1242. _mm_storeu_si128(((__m128i*)ter) + 1, squareErrorHigh);
  1243. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  1244. error0 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[2]));
  1245. error1 = _mm_abs_epi32(_mm_add_epi32(pix, g_table256_SIMD[3]));
  1246. error2 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[2]));
  1247. error3 = _mm_abs_epi32(_mm_sub_epi32(pix, g_table256_SIMD[3]));
  1248. index0 = _mm_and_si128(_mm_cmplt_epi32(error1, error0), _mm_set1_epi32(1));
  1249. minError0 = _mm_min_epi32(error0, error1);
  1250. index1 = _mm_sub_epi32(_mm_set1_epi32(2), _mm_cmplt_epi32(error3, error2));
  1251. minError1 = _mm_min_epi32(error2, error3);
  1252. __m128i minIndex1 = _mm_blendv_epi8(index0, index1, _mm_cmplt_epi32(minError1, minError0));
  1253. minError = _mm_min_epi32(minError0, minError1);
  1254. // Squaring the minimum error to produce correct values when adding
  1255. minErrorLow = _mm_shuffle_epi32(minError, _MM_SHUFFLE(1, 1, 0, 0));
  1256. squareErrorLow = _mm_mul_epi32(minErrorLow, minErrorLow);
  1257. squareErrorLow = _mm_add_epi64(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 2));
  1258. _mm_storeu_si128(((__m128i*)ter) + 2, squareErrorLow);
  1259. minErrorHigh = _mm_shuffle_epi32(minError, _MM_SHUFFLE(3, 3, 2, 2));
  1260. squareErrorHigh = _mm_mul_epi32(minErrorHigh, minErrorHigh);
  1261. squareErrorHigh = _mm_add_epi64(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 3));
  1262. _mm_storeu_si128(((__m128i*)ter) + 3, squareErrorHigh);
  1263. __m128i minIndex = _mm_packs_epi32(minIndex0, minIndex1);
  1264. _mm_storeu_si128((__m128i*)sel, minIndex);
  1265. #elif defined __ARM_NEON
  1266. int32x4_t pix = vdupq_n_s32(dr * 77 + dg * 151 + db * 28);
  1267. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  1268. uint32x4_t error0 = vreinterpretq_u32_s32(vabsq_s32(vaddq_s32(pix, g_table256_NEON[0])));
  1269. uint32x4_t error1 = vreinterpretq_u32_s32(vabsq_s32(vaddq_s32(pix, g_table256_NEON[1])));
  1270. uint32x4_t error2 = vreinterpretq_u32_s32(vabsq_s32(vsubq_s32(pix, g_table256_NEON[0])));
  1271. uint32x4_t error3 = vreinterpretq_u32_s32(vabsq_s32(vsubq_s32(pix, g_table256_NEON[1])));
  1272. uint32x4_t index0 = vandq_u32(vcltq_u32(error1, error0), vdupq_n_u32(1));
  1273. uint32x4_t minError0 = vminq_u32(error0, error1);
  1274. uint32x4_t index1 = vreinterpretq_u32_s32(vsubq_s32(vdupq_n_s32(2), vreinterpretq_s32_u32(vcltq_u32(error3, error2))));
  1275. uint32x4_t minError1 = vminq_u32(error2, error3);
  1276. uint32x4_t blendMask = vcltq_u32(minError1, minError0);
  1277. uint32x4_t minIndex0 = vorrq_u32(vbicq_u32(index0, blendMask), vandq_u32(index1, blendMask));
  1278. uint32x4_t minError = vminq_u32(minError0, minError1);
  1279. // Squaring the minimum error to produce correct values when adding
  1280. uint32x4_t squareErrorLow = vmulq_u32(minError, minError);
  1281. uint32x4_t squareErrorHigh = vshrq_n_u32(vreinterpretq_u32_s32(vqdmulhq_s32(vreinterpretq_s32_u32(minError), vreinterpretq_s32_u32(minError))), 1);
  1282. uint32x4x2_t squareErrorZip = vzipq_u32(squareErrorLow, squareErrorHigh);
  1283. uint64x2x2_t squareError = { vreinterpretq_u64_u32(squareErrorZip.val[0]), vreinterpretq_u64_u32(squareErrorZip.val[1]) };
  1284. squareError.val[0] = vaddq_u64(squareError.val[0], vld1q_u64(ter + 0));
  1285. squareError.val[1] = vaddq_u64(squareError.val[1], vld1q_u64(ter + 2));
  1286. vst1q_u64(ter + 0, squareError.val[0]);
  1287. vst1q_u64(ter + 2, squareError.val[1]);
  1288. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  1289. error0 = vreinterpretq_u32_s32( vabsq_s32(vaddq_s32(pix, g_table256_NEON[2])));
  1290. error1 = vreinterpretq_u32_s32( vabsq_s32(vaddq_s32(pix, g_table256_NEON[3])));
  1291. error2 = vreinterpretq_u32_s32( vabsq_s32(vsubq_s32(pix, g_table256_NEON[2])));
  1292. error3 = vreinterpretq_u32_s32( vabsq_s32(vsubq_s32(pix, g_table256_NEON[3])));
  1293. index0 = vandq_u32(vcltq_u32(error1, error0), vdupq_n_u32(1));
  1294. minError0 = vminq_u32(error0, error1);
  1295. index1 = vreinterpretq_u32_s32( vsubq_s32(vdupq_n_s32(2), vreinterpretq_s32_u32(vcltq_u32(error3, error2))) );
  1296. minError1 = vminq_u32(error2, error3);
  1297. blendMask = vcltq_u32(minError1, minError0);
  1298. uint32x4_t minIndex1 = vorrq_u32(vbicq_u32(index0, blendMask), vandq_u32(index1, blendMask));
  1299. minError = vminq_u32(minError0, minError1);
  1300. // Squaring the minimum error to produce correct values when adding
  1301. squareErrorLow = vmulq_u32(minError, minError);
  1302. squareErrorHigh = vshrq_n_u32(vreinterpretq_u32_s32( vqdmulhq_s32(vreinterpretq_s32_u32(minError), vreinterpretq_s32_u32(minError)) ), 1 );
  1303. squareErrorZip = vzipq_u32(squareErrorLow, squareErrorHigh);
  1304. squareError.val[0] = vaddq_u64(vreinterpretq_u64_u32( squareErrorZip.val[0] ), vld1q_u64(ter + 4));
  1305. squareError.val[1] = vaddq_u64(vreinterpretq_u64_u32( squareErrorZip.val[1] ), vld1q_u64(ter + 6));
  1306. vst1q_u64(ter + 4, squareError.val[0]);
  1307. vst1q_u64(ter + 6, squareError.val[1]);
  1308. uint16x8_t minIndex = vcombine_u16(vqmovn_u32(minIndex0), vqmovn_u32(minIndex1));
  1309. vst1q_u16(sel, minIndex);
  1310. #else
  1311. int pix = dr * 77 + dg * 151 + db * 28;
  1312. for( int t=0; t<8; t++ )
  1313. {
  1314. const int64_t* tab = g_table256[t];
  1315. unsigned int idx = 0;
  1316. uint64_t err = sq( tab[0] + pix );
  1317. for( int j=1; j<4; j++ )
  1318. {
  1319. uint64_t local = sq( tab[j] + pix );
  1320. if( local < err )
  1321. {
  1322. err = local;
  1323. idx = j;
  1324. }
  1325. }
  1326. *sel++ = idx;
  1327. *ter++ += err;
  1328. }
  1329. #endif
  1330. }
  1331. }
  1332. #if defined __SSE4_1__ || defined __ARM_NEON
  1333. // Non-reference implementation, but faster. Produces same results as the AVX2 version
  1334. static etcpak_force_inline void FindBestFit( uint32_t terr[2][8], uint16_t tsel[16][8], v4i a[8], const uint32_t* id, const uint8_t* data )
  1335. {
  1336. for( size_t i=0; i<16; i++ )
  1337. {
  1338. uint16_t* sel = tsel[i];
  1339. unsigned int bid = id[i];
  1340. uint32_t* ter = terr[bid%2];
  1341. uint8_t b = *data++;
  1342. uint8_t g = *data++;
  1343. uint8_t r = *data++;
  1344. data++;
  1345. int dr = a[bid][0] - r;
  1346. int dg = a[bid][1] - g;
  1347. int db = a[bid][2] - b;
  1348. #ifdef __SSE4_1__
  1349. // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
  1350. // This produces slightly different results, but is significant faster
  1351. __m128i pixel = _mm_set1_epi16(dr * 38 + dg * 76 + db * 14);
  1352. __m128i pix = _mm_abs_epi16(pixel);
  1353. // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
  1354. // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
  1355. __m128i error0 = _mm_abs_epi16(_mm_sub_epi16(pix, g_table128_SIMD[0]));
  1356. __m128i error1 = _mm_abs_epi16(_mm_sub_epi16(pix, g_table128_SIMD[1]));
  1357. __m128i index = _mm_and_si128(_mm_cmplt_epi16(error1, error0), _mm_set1_epi16(1));
  1358. __m128i minError = _mm_min_epi16(error0, error1);
  1359. // Exploiting symmetry of the selector table and use the sign bit
  1360. // This produces slightly different results, but is needed to produce same results as AVX2 implementation
  1361. __m128i indexBit = _mm_andnot_si128(_mm_srli_epi16(pixel, 15), _mm_set1_epi8(-1));
  1362. __m128i minIndex = _mm_or_si128(index, _mm_add_epi16(indexBit, indexBit));
  1363. // Squaring the minimum error to produce correct values when adding
  1364. __m128i squareErrorLo = _mm_mullo_epi16(minError, minError);
  1365. __m128i squareErrorHi = _mm_mulhi_epi16(minError, minError);
  1366. __m128i squareErrorLow = _mm_unpacklo_epi16(squareErrorLo, squareErrorHi);
  1367. __m128i squareErrorHigh = _mm_unpackhi_epi16(squareErrorLo, squareErrorHi);
  1368. squareErrorLow = _mm_add_epi32(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 0));
  1369. _mm_storeu_si128(((__m128i*)ter) + 0, squareErrorLow);
  1370. squareErrorHigh = _mm_add_epi32(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 1));
  1371. _mm_storeu_si128(((__m128i*)ter) + 1, squareErrorHigh);
  1372. _mm_storeu_si128((__m128i*)sel, minIndex);
  1373. #elif defined __ARM_NEON
  1374. int16x8_t pixel = vdupq_n_s16( dr * 38 + dg * 76 + db * 14 );
  1375. int16x8_t pix = vabsq_s16( pixel );
  1376. int16x8_t error0 = vabsq_s16( vsubq_s16( pix, g_table128_NEON[0] ) );
  1377. int16x8_t error1 = vabsq_s16( vsubq_s16( pix, g_table128_NEON[1] ) );
  1378. int16x8_t index = vandq_s16( vreinterpretq_s16_u16( vcltq_s16( error1, error0 ) ), vdupq_n_s16( 1 ) );
  1379. int16x8_t minError = vminq_s16( error0, error1 );
  1380. int16x8_t indexBit = vandq_s16( vmvnq_s16( vshrq_n_s16( pixel, 15 ) ), vdupq_n_s16( -1 ) );
  1381. int16x8_t minIndex = vorrq_s16( index, vaddq_s16( indexBit, indexBit ) );
  1382. int16x4_t minErrorLow = vget_low_s16( minError );
  1383. int16x4_t minErrorHigh = vget_high_s16( minError );
  1384. int32x4_t squareErrorLow = vmull_s16( minErrorLow, minErrorLow );
  1385. int32x4_t squareErrorHigh = vmull_s16( minErrorHigh, minErrorHigh );
  1386. int32x4_t squareErrorSumLow = vaddq_s32( squareErrorLow, vld1q_s32( (int32_t*)ter ) );
  1387. int32x4_t squareErrorSumHigh = vaddq_s32( squareErrorHigh, vld1q_s32( (int32_t*)ter + 4 ) );
  1388. vst1q_s32( (int32_t*)ter, squareErrorSumLow );
  1389. vst1q_s32( (int32_t*)ter + 4, squareErrorSumHigh );
  1390. vst1q_s16( (int16_t*)sel, minIndex );
  1391. #endif
  1392. }
  1393. }
  1394. #endif
  1395. static etcpak_force_inline uint8_t convert6(float f)
  1396. {
  1397. int i = (std::min(std::max(static_cast<int>(f), 0), 1023) - 15) >> 1;
  1398. return (i + 11 - ((i + 11) >> 7) - ((i + 4) >> 7)) >> 3;
  1399. }
  1400. static etcpak_force_inline uint8_t convert7(float f)
  1401. {
  1402. int i = (std::min(std::max(static_cast<int>(f), 0), 1023) - 15) >> 1;
  1403. return (i + 9 - ((i + 9) >> 8) - ((i + 6) >> 8)) >> 2;
  1404. }
  1405. static etcpak_force_inline std::pair<uint64_t, uint64_t> Planar( const uint8_t* src, const uint8_t mode, bool useHeuristics )
  1406. {
  1407. int32_t r = 0;
  1408. int32_t g = 0;
  1409. int32_t b = 0;
  1410. for( int i = 0; i < 16; ++i )
  1411. {
  1412. b += src[i * 4 + 0];
  1413. g += src[i * 4 + 1];
  1414. r += src[i * 4 + 2];
  1415. }
  1416. int32_t difRyz = 0;
  1417. int32_t difGyz = 0;
  1418. int32_t difByz = 0;
  1419. int32_t difRxz = 0;
  1420. int32_t difGxz = 0;
  1421. int32_t difBxz = 0;
  1422. const int32_t scaling[] = { -255, -85, 85, 255 };
  1423. for (int i = 0; i < 16; ++i)
  1424. {
  1425. int32_t difB = (static_cast<int>(src[i * 4 + 0]) << 4) - b;
  1426. int32_t difG = (static_cast<int>(src[i * 4 + 1]) << 4) - g;
  1427. int32_t difR = (static_cast<int>(src[i * 4 + 2]) << 4) - r;
  1428. difRyz += difR * scaling[i % 4];
  1429. difGyz += difG * scaling[i % 4];
  1430. difByz += difB * scaling[i % 4];
  1431. difRxz += difR * scaling[i / 4];
  1432. difGxz += difG * scaling[i / 4];
  1433. difBxz += difB * scaling[i / 4];
  1434. }
  1435. const float scale = -4.0f / ((255 * 255 * 8.0f + 85 * 85 * 8.0f) * 16.0f);
  1436. float aR = difRxz * scale;
  1437. float aG = difGxz * scale;
  1438. float aB = difBxz * scale;
  1439. float bR = difRyz * scale;
  1440. float bG = difGyz * scale;
  1441. float bB = difByz * scale;
  1442. float dR = r * (4.0f / 16.0f);
  1443. float dG = g * (4.0f / 16.0f);
  1444. float dB = b * (4.0f / 16.0f);
  1445. // calculating the three colors RGBO, RGBH, and RGBV. RGB = df - af * x - bf * y;
  1446. float cofR = std::fma(aR, 255.0f, std::fma(bR, 255.0f, dR));
  1447. float cofG = std::fma(aG, 255.0f, std::fma(bG, 255.0f, dG));
  1448. float cofB = std::fma(aB, 255.0f, std::fma(bB, 255.0f, dB));
  1449. float chfR = std::fma(aR, -425.0f, std::fma(bR, 255.0f, dR));
  1450. float chfG = std::fma(aG, -425.0f, std::fma(bG, 255.0f, dG));
  1451. float chfB = std::fma(aB, -425.0f, std::fma(bB, 255.0f, dB));
  1452. float cvfR = std::fma(aR, 255.0f, std::fma(bR, -425.0f, dR));
  1453. float cvfG = std::fma(aG, 255.0f, std::fma(bG, -425.0f, dG));
  1454. float cvfB = std::fma(aB, 255.0f, std::fma(bB, -425.0f, dB));
  1455. // convert to r6g7b6
  1456. int32_t coR = convert6(cofR);
  1457. int32_t coG = convert7(cofG);
  1458. int32_t coB = convert6(cofB);
  1459. int32_t chR = convert6(chfR);
  1460. int32_t chG = convert7(chfG);
  1461. int32_t chB = convert6(chfB);
  1462. int32_t cvR = convert6(cvfR);
  1463. int32_t cvG = convert7(cvfG);
  1464. int32_t cvB = convert6(cvfB);
  1465. // Error calculation
  1466. uint64_t error = 0;
  1467. if( ModePlanar != mode && useHeuristics )
  1468. {
  1469. auto ro0 = coR;
  1470. auto go0 = coG;
  1471. auto bo0 = coB;
  1472. auto ro1 = ( ro0 >> 4 ) | ( ro0 << 2 );
  1473. auto go1 = ( go0 >> 6 ) | ( go0 << 1 );
  1474. auto bo1 = ( bo0 >> 4 ) | ( bo0 << 2 );
  1475. auto ro2 = ( ro1 << 2 ) + 2;
  1476. auto go2 = ( go1 << 2 ) + 2;
  1477. auto bo2 = ( bo1 << 2 ) + 2;
  1478. auto rh0 = chR;
  1479. auto gh0 = chG;
  1480. auto bh0 = chB;
  1481. auto rh1 = ( rh0 >> 4 ) | ( rh0 << 2 );
  1482. auto gh1 = ( gh0 >> 6 ) | ( gh0 << 1 );
  1483. auto bh1 = ( bh0 >> 4 ) | ( bh0 << 2 );
  1484. auto rh2 = rh1 - ro1;
  1485. auto gh2 = gh1 - go1;
  1486. auto bh2 = bh1 - bo1;
  1487. auto rv0 = cvR;
  1488. auto gv0 = cvG;
  1489. auto bv0 = cvB;
  1490. auto rv1 = ( rv0 >> 4 ) | ( rv0 << 2 );
  1491. auto gv1 = ( gv0 >> 6 ) | ( gv0 << 1 );
  1492. auto bv1 = ( bv0 >> 4 ) | ( bv0 << 2 );
  1493. auto rv2 = rv1 - ro1;
  1494. auto gv2 = gv1 - go1;
  1495. auto bv2 = bv1 - bo1;
  1496. for( int i = 0; i < 16; ++i )
  1497. {
  1498. int32_t cR = clampu8( ( rh2 * ( i / 4 ) + rv2 * ( i % 4 ) + ro2 ) >> 2 );
  1499. int32_t cG = clampu8( ( gh2 * ( i / 4 ) + gv2 * ( i % 4 ) + go2 ) >> 2 );
  1500. int32_t cB = clampu8( ( bh2 * ( i / 4 ) + bv2 * ( i % 4 ) + bo2 ) >> 2 );
  1501. int32_t difB = static_cast<int>( src[i * 4 + 0] ) - cB;
  1502. int32_t difG = static_cast<int>( src[i * 4 + 1] ) - cG;
  1503. int32_t difR = static_cast<int>( src[i * 4 + 2] ) - cR;
  1504. int32_t dif = difR * 38 + difG * 76 + difB * 14;
  1505. error += dif * dif;
  1506. }
  1507. }
  1508. /**/
  1509. uint32_t rgbv = cvB | ( cvG << 6 ) | ( cvR << 13 );
  1510. uint32_t rgbh = chB | ( chG << 6 ) | ( chR << 13 );
  1511. uint32_t hi = rgbv | ( ( rgbh & 0x1FFF ) << 19 );
  1512. uint32_t lo = ( chR & 0x1 ) | 0x2 | ( ( chR << 1 ) & 0x7C );
  1513. lo |= ( ( coB & 0x07 ) << 7 ) | ( ( coB & 0x18 ) << 8 ) | ( ( coB & 0x20 ) << 11 );
  1514. lo |= ( ( coG & 0x3F ) << 17 ) | ( ( coG & 0x40 ) << 18 );
  1515. lo |= coR << 25;
  1516. const auto idx = ( coR & 0x20 ) | ( ( coG & 0x20 ) >> 1 ) | ( ( coB & 0x1E ) >> 1 );
  1517. lo |= g_flags[idx];
  1518. uint64_t result = static_cast<uint32_t>( _bswap( lo ) );
  1519. result |= static_cast<uint64_t>( static_cast<uint32_t>( _bswap( hi ) ) ) << 32;
  1520. return std::make_pair( result, error );
  1521. }
  1522. #ifdef __ARM_NEON
  1523. static etcpak_force_inline int32x2_t Planar_NEON_DifXZ( int16x8_t dif_lo, int16x8_t dif_hi )
  1524. {
  1525. int32x4_t dif0 = vmull_n_s16( vget_low_s16( dif_lo ), -255 );
  1526. int32x4_t dif1 = vmull_n_s16( vget_high_s16( dif_lo ), -85 );
  1527. int32x4_t dif2 = vmull_n_s16( vget_low_s16( dif_hi ), 85 );
  1528. int32x4_t dif3 = vmull_n_s16( vget_high_s16( dif_hi ), 255 );
  1529. int32x4_t dif4 = vaddq_s32( vaddq_s32( dif0, dif1 ), vaddq_s32( dif2, dif3 ) );
  1530. #ifndef __aarch64__
  1531. int32x2_t dif5 = vpadd_s32( vget_low_s32( dif4 ), vget_high_s32( dif4 ) );
  1532. return vpadd_s32( dif5, dif5 );
  1533. #else
  1534. return vdup_n_s32( vaddvq_s32( dif4 ) );
  1535. #endif
  1536. }
  1537. static etcpak_force_inline int32x2_t Planar_NEON_DifYZ( int16x8_t dif_lo, int16x8_t dif_hi )
  1538. {
  1539. int16x4_t scaling = { -255, -85, 85, 255 };
  1540. int32x4_t dif0 = vmull_s16( vget_low_s16( dif_lo ), scaling );
  1541. int32x4_t dif1 = vmull_s16( vget_high_s16( dif_lo ), scaling );
  1542. int32x4_t dif2 = vmull_s16( vget_low_s16( dif_hi ), scaling );
  1543. int32x4_t dif3 = vmull_s16( vget_high_s16( dif_hi ), scaling );
  1544. int32x4_t dif4 = vaddq_s32( vaddq_s32( dif0, dif1 ), vaddq_s32( dif2, dif3 ) );
  1545. #ifndef __aarch64__
  1546. int32x2_t dif5 = vpadd_s32( vget_low_s32( dif4 ), vget_high_s32( dif4 ) );
  1547. return vpadd_s32( dif5, dif5 );
  1548. #else
  1549. return vdup_n_s32( vaddvq_s32( dif4 ) );
  1550. #endif
  1551. }
  1552. static etcpak_force_inline int16x8_t Planar_NEON_SumWide( uint8x16_t src )
  1553. {
  1554. uint16x8_t accu8 = vpaddlq_u8( src );
  1555. #ifndef __aarch64__
  1556. uint16x4_t accu4 = vpadd_u16( vget_low_u16( accu8 ), vget_high_u16( accu8 ) );
  1557. uint16x4_t accu2 = vpadd_u16( accu4, accu4 );
  1558. uint16x4_t accu1 = vpadd_u16( accu2, accu2 );
  1559. return vreinterpretq_s16_u16( vcombine_u16( accu1, accu1 ) );
  1560. #else
  1561. return vdupq_n_s16( vaddvq_u16( accu8 ) );
  1562. #endif
  1563. }
  1564. static etcpak_force_inline int16x8_t convert6_NEON( int32x4_t lo, int32x4_t hi )
  1565. {
  1566. uint16x8_t x = vcombine_u16( vqmovun_s32( lo ), vqmovun_s32( hi ) );
  1567. int16x8_t i = vreinterpretq_s16_u16( vshrq_n_u16( vqshlq_n_u16( x, 6 ), 6) ); // clamp 0-1023
  1568. i = vhsubq_s16( i, vdupq_n_s16( 15 ) );
  1569. int16x8_t ip11 = vaddq_s16( i, vdupq_n_s16( 11 ) );
  1570. int16x8_t ip4 = vaddq_s16( i, vdupq_n_s16( 4 ) );
  1571. return vshrq_n_s16( vsubq_s16( vsubq_s16( ip11, vshrq_n_s16( ip11, 7 ) ), vshrq_n_s16( ip4, 7) ), 3 );
  1572. }
  1573. static etcpak_force_inline int16x4_t convert7_NEON( int32x4_t x )
  1574. {
  1575. int16x4_t i = vreinterpret_s16_u16( vshr_n_u16( vqshl_n_u16( vqmovun_s32( x ), 6 ), 6 ) ); // clamp 0-1023
  1576. i = vhsub_s16( i, vdup_n_s16( 15 ) );
  1577. int16x4_t p9 = vadd_s16( i, vdup_n_s16( 9 ) );
  1578. int16x4_t p6 = vadd_s16( i, vdup_n_s16( 6 ) );
  1579. return vshr_n_s16( vsub_s16( vsub_s16( p9, vshr_n_s16( p9, 8 ) ), vshr_n_s16( p6, 8 ) ), 2 );
  1580. }
  1581. static etcpak_force_inline std::pair<uint64_t, uint64_t> Planar_NEON( const uint8_t* src, const uint8_t mode, bool useHeuristics )
  1582. {
  1583. uint8x16x4_t srcBlock = vld4q_u8( src );
  1584. int16x8_t bSumWide = Planar_NEON_SumWide( srcBlock.val[0] );
  1585. int16x8_t gSumWide = Planar_NEON_SumWide( srcBlock.val[1] );
  1586. int16x8_t rSumWide = Planar_NEON_SumWide( srcBlock.val[2] );
  1587. int16x8_t dif_R_lo = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_low_u8( srcBlock.val[2] ), 4) ), rSumWide );
  1588. int16x8_t dif_R_hi = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_high_u8( srcBlock.val[2] ), 4) ), rSumWide );
  1589. int16x8_t dif_G_lo = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_low_u8( srcBlock.val[1] ), 4 ) ), gSumWide );
  1590. int16x8_t dif_G_hi = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_high_u8( srcBlock.val[1] ), 4 ) ), gSumWide );
  1591. int16x8_t dif_B_lo = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_low_u8( srcBlock.val[0] ), 4) ), bSumWide );
  1592. int16x8_t dif_B_hi = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_high_u8( srcBlock.val[0] ), 4) ), bSumWide );
  1593. int32x2x2_t dif_xz_z = vzip_s32( vzip_s32( Planar_NEON_DifXZ( dif_B_lo, dif_B_hi ), Planar_NEON_DifXZ( dif_R_lo, dif_R_hi ) ).val[0], Planar_NEON_DifXZ( dif_G_lo, dif_G_hi ) );
  1594. int32x4_t dif_xz = vcombine_s32( dif_xz_z.val[0], dif_xz_z.val[1] );
  1595. int32x2x2_t dif_yz_z = vzip_s32( vzip_s32( Planar_NEON_DifYZ( dif_B_lo, dif_B_hi ), Planar_NEON_DifYZ( dif_R_lo, dif_R_hi ) ).val[0], Planar_NEON_DifYZ( dif_G_lo, dif_G_hi ) );
  1596. int32x4_t dif_yz = vcombine_s32( dif_yz_z.val[0], dif_yz_z.val[1] );
  1597. const float fscale = -4.0f / ( (255 * 255 * 8.0f + 85 * 85 * 8.0f ) * 16.0f );
  1598. float32x4_t fa = vmulq_n_f32( vcvtq_f32_s32( dif_xz ), fscale );
  1599. float32x4_t fb = vmulq_n_f32( vcvtq_f32_s32( dif_yz ), fscale );
  1600. int16x4_t bgrgSum = vzip_s16( vzip_s16( vget_low_s16( bSumWide ), vget_low_s16( rSumWide ) ).val[0], vget_low_s16( gSumWide ) ).val[0];
  1601. float32x4_t fd = vmulq_n_f32( vcvtq_f32_s32( vmovl_s16( bgrgSum ) ), 4.0f / 16.0f);
  1602. float32x4_t cof = vmlaq_n_f32( vmlaq_n_f32( fd, fb, 255.0f ), fa, 255.0f );
  1603. float32x4_t chf = vmlaq_n_f32( vmlaq_n_f32( fd, fb, 255.0f ), fa, -425.0f );
  1604. float32x4_t cvf = vmlaq_n_f32( vmlaq_n_f32( fd, fb, -425.0f ), fa, 255.0f );
  1605. int32x4_t coi = vcvtq_s32_f32( cof );
  1606. int32x4_t chi = vcvtq_s32_f32( chf );
  1607. int32x4_t cvi = vcvtq_s32_f32( cvf );
  1608. int32x4x2_t tr_hv = vtrnq_s32( chi, cvi );
  1609. int32x4x2_t tr_o = vtrnq_s32( coi, coi );
  1610. int16x8_t c_hvoo_br_6 = convert6_NEON( tr_hv.val[0], tr_o.val[0] );
  1611. int16x4_t c_hvox_g_7 = convert7_NEON( vcombine_s32( vget_low_s32( tr_hv.val[1] ), vget_low_s32( tr_o.val[1] ) ) );
  1612. int16x8_t c_hvoo_br_8 = vorrq_s16( vshrq_n_s16( c_hvoo_br_6, 4 ), vshlq_n_s16( c_hvoo_br_6, 2 ) );
  1613. int16x4_t c_hvox_g_8 = vorr_s16( vshr_n_s16( c_hvox_g_7, 6 ), vshl_n_s16( c_hvox_g_7, 1 ) );
  1614. uint64_t error = 0;
  1615. if( mode != ModePlanar && useHeuristics )
  1616. {
  1617. int16x4_t rec_gxbr_o = vext_s16( c_hvox_g_8, vget_high_s16( c_hvoo_br_8 ), 3 );
  1618. rec_gxbr_o = vadd_s16( vshl_n_s16( rec_gxbr_o, 2 ), vdup_n_s16( 2 ) );
  1619. int16x8_t rec_ro_wide = vdupq_lane_s16( rec_gxbr_o, 3 );
  1620. int16x8_t rec_go_wide = vdupq_lane_s16( rec_gxbr_o, 0 );
  1621. int16x8_t rec_bo_wide = vdupq_lane_s16( rec_gxbr_o, 1 );
  1622. int16x4_t br_hv2 = vsub_s16( vget_low_s16( c_hvoo_br_8 ), vget_high_s16( c_hvoo_br_8 ) );
  1623. int16x4_t gg_hv2 = vsub_s16( c_hvox_g_8, vdup_lane_s16( c_hvox_g_8, 2 ) );
  1624. int16x8_t scaleh_lo = { 0, 0, 0, 0, 1, 1, 1, 1 };
  1625. int16x8_t scaleh_hi = { 2, 2, 2, 2, 3, 3, 3, 3 };
  1626. int16x8_t scalev = { 0, 1, 2, 3, 0, 1, 2, 3 };
  1627. int16x8_t rec_r_1 = vmlaq_lane_s16( rec_ro_wide, scalev, br_hv2, 3 );
  1628. int16x8_t rec_r_lo = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_r_1, scaleh_lo, br_hv2, 2 ), 2 ) ) );
  1629. int16x8_t rec_r_hi = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_r_1, scaleh_hi, br_hv2, 2 ), 2 ) ) );
  1630. int16x8_t rec_b_1 = vmlaq_lane_s16( rec_bo_wide, scalev, br_hv2, 1 );
  1631. int16x8_t rec_b_lo = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_b_1, scaleh_lo, br_hv2, 0 ), 2 ) ) );
  1632. int16x8_t rec_b_hi = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_b_1, scaleh_hi, br_hv2, 0 ), 2 ) ) );
  1633. int16x8_t rec_g_1 = vmlaq_lane_s16( rec_go_wide, scalev, gg_hv2, 1 );
  1634. int16x8_t rec_g_lo = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_g_1, scaleh_lo, gg_hv2, 0 ), 2 ) ) );
  1635. int16x8_t rec_g_hi = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_g_1, scaleh_hi, gg_hv2, 0 ), 2 ) ) );
  1636. int16x8_t dif_r_lo = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_low_u8( srcBlock.val[2] ) ) ), rec_r_lo );
  1637. int16x8_t dif_r_hi = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_high_u8( srcBlock.val[2] ) ) ), rec_r_hi );
  1638. int16x8_t dif_g_lo = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_low_u8( srcBlock.val[1] ) ) ), rec_g_lo );
  1639. int16x8_t dif_g_hi = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_high_u8( srcBlock.val[1] ) ) ), rec_g_hi );
  1640. int16x8_t dif_b_lo = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_low_u8( srcBlock.val[0] ) ) ), rec_b_lo );
  1641. int16x8_t dif_b_hi = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_high_u8( srcBlock.val[0] ) ) ), rec_b_hi );
  1642. int16x8_t dif_lo = vmlaq_n_s16( vmlaq_n_s16( vmulq_n_s16( dif_r_lo, 38 ), dif_g_lo, 76 ), dif_b_lo, 14 );
  1643. int16x8_t dif_hi = vmlaq_n_s16( vmlaq_n_s16( vmulq_n_s16( dif_r_hi, 38 ), dif_g_hi, 76 ), dif_b_hi, 14 );
  1644. int16x4_t tmpDif = vget_low_s16( dif_lo );
  1645. int32x4_t difsq_0 = vmull_s16( tmpDif, tmpDif );
  1646. tmpDif = vget_high_s16( dif_lo );
  1647. int32x4_t difsq_1 = vmull_s16( tmpDif, tmpDif );
  1648. tmpDif = vget_low_s16( dif_hi );
  1649. int32x4_t difsq_2 = vmull_s16( tmpDif, tmpDif );
  1650. tmpDif = vget_high_s16( dif_hi );
  1651. int32x4_t difsq_3 = vmull_s16( tmpDif, tmpDif );
  1652. uint32x4_t difsq_5 = vaddq_u32( vreinterpretq_u32_s32( difsq_0 ), vreinterpretq_u32_s32( difsq_1 ) );
  1653. uint32x4_t difsq_6 = vaddq_u32( vreinterpretq_u32_s32( difsq_2 ), vreinterpretq_u32_s32( difsq_3 ) );
  1654. uint64x2_t difsq_7 = vaddl_u32( vget_low_u32( difsq_5 ), vget_high_u32( difsq_5 ) );
  1655. uint64x2_t difsq_8 = vaddl_u32( vget_low_u32( difsq_6 ), vget_high_u32( difsq_6 ) );
  1656. uint64x2_t difsq_9 = vaddq_u64( difsq_7, difsq_8 );
  1657. #ifdef __aarch64__
  1658. error = vaddvq_u64( difsq_9 );
  1659. #else
  1660. error = vgetq_lane_u64( difsq_9, 0 ) + vgetq_lane_u64( difsq_9, 1 );
  1661. #endif
  1662. }
  1663. int32_t coR = c_hvoo_br_6[6];
  1664. int32_t coG = c_hvox_g_7[2];
  1665. int32_t coB = c_hvoo_br_6[4];
  1666. int32_t chR = c_hvoo_br_6[2];
  1667. int32_t chG = c_hvox_g_7[0];
  1668. int32_t chB = c_hvoo_br_6[0];
  1669. int32_t cvR = c_hvoo_br_6[3];
  1670. int32_t cvG = c_hvox_g_7[1];
  1671. int32_t cvB = c_hvoo_br_6[1];
  1672. uint32_t rgbv = cvB | ( cvG << 6 ) | ( cvR << 13 );
  1673. uint32_t rgbh = chB | ( chG << 6 ) | ( chR << 13 );
  1674. uint32_t hi = rgbv | ( ( rgbh & 0x1FFF ) << 19 );
  1675. uint32_t lo = ( chR & 0x1 ) | 0x2 | ( ( chR << 1 ) & 0x7C );
  1676. lo |= ( ( coB & 0x07 ) << 7 ) | ( ( coB & 0x18 ) << 8 ) | ( ( coB & 0x20 ) << 11 );
  1677. lo |= ( ( coG & 0x3F) << 17) | ( (coG & 0x40 ) << 18 );
  1678. lo |= coR << 25;
  1679. const auto idx = ( coR & 0x20 ) | ( ( coG & 0x20 ) >> 1 ) | ( ( coB & 0x1E ) >> 1 );
  1680. lo |= g_flags[idx];
  1681. uint64_t result = static_cast<uint32_t>( _bswap(lo) );
  1682. result |= static_cast<uint64_t>( static_cast<uint32_t>( _bswap( hi ) ) ) << 32;
  1683. return std::make_pair( result, error );
  1684. }
  1685. #endif
  1686. #ifdef __AVX2__
  1687. uint32_t calculateErrorTH( bool tMode, uint8_t( colorsRGB444 )[2][3], uint8_t& dist, uint32_t& pixIndices, uint8_t startDist, __m128i r8, __m128i g8, __m128i b8 )
  1688. #else
  1689. uint32_t calculateErrorTH( bool tMode, uint8_t* src, uint8_t( colorsRGB444 )[2][3], uint8_t& dist, uint32_t& pixIndices, uint8_t startDist )
  1690. #endif
  1691. {
  1692. uint32_t blockErr = 0, bestBlockErr = MaxError;
  1693. uint32_t pixColors;
  1694. uint8_t possibleColors[4][3];
  1695. uint8_t colors[2][3];
  1696. decompressColor( colorsRGB444, colors );
  1697. #ifdef __AVX2__
  1698. __m128i reverseMask = _mm_set_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15 );
  1699. #endif
  1700. // test distances
  1701. for( uint8_t d = startDist; d < 8; ++d )
  1702. {
  1703. if( d >= 2 && dist == d - 2 ) break;
  1704. blockErr = 0;
  1705. pixColors = 0;
  1706. if( tMode )
  1707. {
  1708. calculatePaintColors59T( d, colors, possibleColors );
  1709. }
  1710. else
  1711. {
  1712. calculatePaintColors58H( d, colors, possibleColors );
  1713. }
  1714. #ifdef __AVX2__
  1715. // RGB ordering
  1716. __m128i b8Rev = _mm_shuffle_epi8( b8, reverseMask );
  1717. __m128i g8Rev = _mm_shuffle_epi8( g8, reverseMask );
  1718. __m128i r8Rev = _mm_shuffle_epi8( r8, reverseMask );
  1719. // extends 3x128 bits RGB into 3x256 bits RGB for error comparisions
  1720. static const __m128i zero = _mm_setzero_si128();
  1721. __m128i b8Lo = _mm_unpacklo_epi8( b8Rev, zero );
  1722. __m128i g8Lo = _mm_unpacklo_epi8( g8Rev, zero );
  1723. __m128i r8Lo = _mm_unpacklo_epi8( r8Rev, zero );
  1724. __m128i b8Hi = _mm_unpackhi_epi8( b8Rev, zero );
  1725. __m128i g8Hi = _mm_unpackhi_epi8( g8Rev, zero );
  1726. __m128i r8Hi = _mm_unpackhi_epi8( r8Rev, zero );
  1727. __m256i b8 = _mm256_set_m128i( b8Hi, b8Lo );
  1728. __m256i g8 = _mm256_set_m128i( g8Hi, g8Lo );
  1729. __m256i r8 = _mm256_set_m128i( r8Hi, r8Lo );
  1730. // caculates differences between the pixel colrs and the palette colors
  1731. __m256i diffb = _mm256_abs_epi16( _mm256_sub_epi16( b8, _mm256_set1_epi16( possibleColors[0][B] ) ) );
  1732. __m256i diffg = _mm256_abs_epi16( _mm256_sub_epi16( g8, _mm256_set1_epi16( possibleColors[0][G] ) ) );
  1733. __m256i diffr = _mm256_abs_epi16( _mm256_sub_epi16( r8, _mm256_set1_epi16( possibleColors[0][R] ) ) );
  1734. // luma-based error calculations
  1735. static const __m256i bWeight = _mm256_set1_epi16( 14 );
  1736. static const __m256i gWeight = _mm256_set1_epi16( 76 );
  1737. static const __m256i rWeight = _mm256_set1_epi16( 38 );
  1738. diffb = _mm256_mullo_epi16( diffb, bWeight );
  1739. diffg = _mm256_mullo_epi16( diffg, gWeight );
  1740. diffr = _mm256_mullo_epi16( diffr, rWeight );
  1741. // obtains the error with the current palette color
  1742. __m256i lowestPixErr = _mm256_add_epi16( _mm256_add_epi16( diffb, diffg ), diffr );
  1743. // error calucations with the remaining three palette colors
  1744. static const uint32_t masks[4] = { 0, 0x55555555, 0xAAAAAAAA, 0xFFFFFFFF };
  1745. for( uint8_t c = 1; c < 4; c++ )
  1746. {
  1747. __m256i diffb = _mm256_abs_epi16( _mm256_sub_epi16( b8, _mm256_set1_epi16( possibleColors[c][B] ) ) );
  1748. __m256i diffg = _mm256_abs_epi16( _mm256_sub_epi16( g8, _mm256_set1_epi16( possibleColors[c][G] ) ) );
  1749. __m256i diffr = _mm256_abs_epi16( _mm256_sub_epi16( r8, _mm256_set1_epi16( possibleColors[c][R] ) ) );
  1750. diffb = _mm256_mullo_epi16( diffb, bWeight );
  1751. diffg = _mm256_mullo_epi16( diffg, gWeight );
  1752. diffr = _mm256_mullo_epi16( diffr, rWeight );
  1753. // error comparison with the previous best color
  1754. __m256i pixErrors = _mm256_add_epi16( _mm256_add_epi16( diffb, diffg ), diffr );
  1755. __m256i minErr = _mm256_min_epu16( lowestPixErr, pixErrors );
  1756. __m256i cmpRes = _mm256_cmpeq_epi16( pixErrors, minErr );
  1757. lowestPixErr = minErr;
  1758. // update pixel colors
  1759. uint32_t updPixColors = _mm256_movemask_epi8( cmpRes );
  1760. uint32_t prevPixColors = pixColors & ~updPixColors;
  1761. uint32_t mskPixColors = masks[c] & updPixColors;
  1762. pixColors = prevPixColors | mskPixColors;
  1763. }
  1764. // accumulate the block error
  1765. alignas( 32 ) uint16_t pixErr16[16] = { 0, };
  1766. _mm256_storeu_si256( (__m256i*)pixErr16, lowestPixErr );
  1767. for( uint8_t p = 0; p < 16; p++ )
  1768. {
  1769. blockErr += (int)( pixErr16[p] ) * pixErr16[p];
  1770. }
  1771. #else
  1772. for( size_t y = 0; y < 4; ++y )
  1773. {
  1774. for( size_t x = 0; x < 4; ++x )
  1775. {
  1776. uint32_t bestPixErr = MaxError;
  1777. pixColors <<= 2; // Make room for next value
  1778. // Loop possible block colors
  1779. for( uint8_t c = 0; c < 4; ++c )
  1780. {
  1781. int diff[3];
  1782. diff[R] = src[4 * ( x * 4 + y ) + R] - possibleColors[c][R];
  1783. diff[G] = src[4 * ( x * 4 + y ) + G] - possibleColors[c][G];
  1784. diff[B] = src[4 * ( x * 4 + y ) + B] - possibleColors[c][B];
  1785. const uint32_t err = 38 * abs( diff[R] ) + 76 * abs( diff[G] ) + 14 * abs( diff[B] );
  1786. uint32_t pixErr = err * err;
  1787. // Choose best error
  1788. if( pixErr < bestPixErr )
  1789. {
  1790. bestPixErr = pixErr;
  1791. pixColors ^= ( pixColors & 3 ); // Reset the two first bits
  1792. pixColors |= c;
  1793. }
  1794. }
  1795. blockErr += bestPixErr;
  1796. }
  1797. }
  1798. #endif
  1799. if( blockErr < bestBlockErr )
  1800. {
  1801. bestBlockErr = blockErr;
  1802. dist = d;
  1803. pixIndices = pixColors;
  1804. }
  1805. }
  1806. return bestBlockErr;
  1807. }
  1808. // main T-/H-mode compression function
  1809. #ifdef __AVX2__
  1810. uint32_t compressBlockTH( uint8_t* src, Luma& l, uint32_t& compressed1, uint32_t& compressed2, bool& tMode, __m128i r8, __m128i g8, __m128i b8 )
  1811. #else
  1812. uint32_t compressBlockTH( uint8_t *src, Luma& l, uint32_t& compressed1, uint32_t& compressed2, bool &tMode )
  1813. #endif
  1814. {
  1815. #ifdef __AVX2__
  1816. alignas( 8 ) uint8_t luma[16] = { 0, };
  1817. _mm_storeu_si128 ( (__m128i* )luma, l.luma8 );
  1818. #elif defined __ARM_NEON && defined __aarch64__
  1819. alignas( 8 ) uint8_t luma[16] = { 0 };
  1820. vst1q_u8( luma, l.luma8 );
  1821. #else
  1822. uint8_t* luma = l.val;
  1823. #endif
  1824. uint8_t pixIdx[16] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
  1825. // 1) sorts the pairs of (luma, pix_idx)
  1826. insertionSort( luma, pixIdx );
  1827. // 2) finds the min (left+right)
  1828. uint8_t minSumRangeIdx = 0;
  1829. uint16_t minSumRangeValue;
  1830. uint16_t sum;
  1831. static const uint8_t diffBonus[15] = {8, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 8};
  1832. const int16_t temp = luma[15] - luma[0];
  1833. minSumRangeValue = luma[15] - luma[1] + diffBonus[0];
  1834. for( uint8_t i = 1; i < 14; i++ )
  1835. {
  1836. sum = temp - luma[i+1] + luma[i] + diffBonus[i];
  1837. if( minSumRangeValue > sum )
  1838. {
  1839. minSumRangeValue = sum;
  1840. minSumRangeIdx = i;
  1841. }
  1842. }
  1843. sum = luma[14] - luma[0] + diffBonus[14];
  1844. if( minSumRangeValue > sum )
  1845. {
  1846. minSumRangeValue = sum;
  1847. minSumRangeIdx = 14;
  1848. }
  1849. uint8_t lRange, rRange;
  1850. lRange = luma[minSumRangeIdx] - luma[0];
  1851. rRange = luma[15] - luma[minSumRangeIdx + 1];
  1852. // 3) sets a proper mode
  1853. bool swap = false;
  1854. if( lRange >= rRange )
  1855. {
  1856. if( lRange >= rRange * 2 )
  1857. {
  1858. swap = true;
  1859. tMode = true;
  1860. }
  1861. }
  1862. else
  1863. {
  1864. if( lRange * 2 <= rRange ) tMode = true;
  1865. }
  1866. // 4) calculates the two base colors
  1867. uint8_t rangeIdx[4] = { pixIdx[0], pixIdx[minSumRangeIdx], pixIdx[minSumRangeIdx + 1], pixIdx[15] };
  1868. uint16_t r[4], g[4], b[4];
  1869. for( uint8_t i = 0; i < 4; ++i )
  1870. {
  1871. uint8_t idx = rangeIdx[i] * 4;
  1872. b[i] = src[idx];
  1873. g[i] = src[idx + 1];
  1874. r[i] = src[idx + 2];
  1875. }
  1876. uint8_t mid_rgb[2][3];
  1877. if( swap )
  1878. {
  1879. mid_rgb[1][B] = ( b[0] + b[1] ) / 2;
  1880. mid_rgb[1][G] = ( g[0] + g[1] ) / 2;
  1881. mid_rgb[1][R] = ( r[0] + r[1] ) / 2;
  1882. uint16_t sum_rgb[3] = { 0, 0, 0 };
  1883. for( uint8_t i = minSumRangeIdx + 1; i < 16; i++ )
  1884. {
  1885. uint8_t idx = pixIdx[i] * 4;
  1886. sum_rgb[B] += src[idx];
  1887. sum_rgb[G] += src[idx + 1];
  1888. sum_rgb[R] += src[idx + 2];
  1889. }
  1890. const uint8_t temp = 15 - minSumRangeIdx;
  1891. mid_rgb[0][B] = sum_rgb[B] / temp;
  1892. mid_rgb[0][G] = sum_rgb[G] / temp;
  1893. mid_rgb[0][R] = sum_rgb[R] / temp;
  1894. }
  1895. else
  1896. {
  1897. mid_rgb[0][B] = (b[0] + b[1]) / 2;
  1898. mid_rgb[0][G] = (g[0] + g[1]) / 2;
  1899. mid_rgb[0][R] = (r[0] + r[1]) / 2;
  1900. if( tMode )
  1901. {
  1902. uint16_t sum_rgb[3] = { 0, 0, 0 };
  1903. for( uint8_t i = minSumRangeIdx + 1; i < 16; i++ )
  1904. {
  1905. uint8_t idx = pixIdx[i] * 4;
  1906. sum_rgb[B] += src[idx];
  1907. sum_rgb[G] += src[idx + 1];
  1908. sum_rgb[R] += src[idx + 2];
  1909. }
  1910. const uint8_t temp = 15 - minSumRangeIdx;
  1911. mid_rgb[1][B] = sum_rgb[B] / temp;
  1912. mid_rgb[1][G] = sum_rgb[G] / temp;
  1913. mid_rgb[1][R] = sum_rgb[R] / temp;
  1914. }
  1915. else
  1916. {
  1917. mid_rgb[1][B] = (b[2] + b[3]) / 2;
  1918. mid_rgb[1][G] = (g[2] + g[3]) / 2;
  1919. mid_rgb[1][R] = (r[2] + r[3]) / 2;
  1920. }
  1921. }
  1922. // 5) sets the start distance index
  1923. uint32_t startDistCandidate;
  1924. uint32_t avgDist;
  1925. if( tMode )
  1926. {
  1927. if( swap )
  1928. {
  1929. avgDist = ( b[1] - b[0] + g[1] - g[0] + r[1] - r[0] ) / 6;
  1930. }
  1931. else
  1932. {
  1933. avgDist = ( b[3] - b[2] + g[3] - g[2] + r[3] - r[2] ) / 6;
  1934. }
  1935. }
  1936. else
  1937. {
  1938. avgDist = ( b[1] - b[0] + g[1] - g[0] + r[1] - r[0] + b[3] - b[2] + g[3] - g[2] + r[3] - r[2] ) / 12;
  1939. }
  1940. if( avgDist <= 16)
  1941. {
  1942. startDistCandidate = 0;
  1943. }
  1944. else if( avgDist <= 23 )
  1945. {
  1946. startDistCandidate = 1;
  1947. }
  1948. else if( avgDist <= 32 )
  1949. {
  1950. startDistCandidate = 2;
  1951. }
  1952. else if( avgDist <= 41 )
  1953. {
  1954. startDistCandidate = 3;
  1955. }
  1956. else
  1957. {
  1958. startDistCandidate = 4;
  1959. }
  1960. uint32_t bestErr = MaxError;
  1961. uint32_t bestPixIndices;
  1962. uint8_t bestDist = 10;
  1963. uint8_t colorsRGB444[2][3];
  1964. compressColor( mid_rgb, colorsRGB444, tMode );
  1965. compressed1 = 0;
  1966. // 6) finds the best candidate with the lowest error
  1967. #ifdef __AVX2__
  1968. // Vectorized ver
  1969. bestErr = calculateErrorTH( tMode, colorsRGB444, bestDist, bestPixIndices, startDistCandidate, r8, g8, b8 );
  1970. #else
  1971. // Scalar ver
  1972. bestErr = calculateErrorTH( tMode, src, colorsRGB444, bestDist, bestPixIndices, startDistCandidate );
  1973. #endif
  1974. // 7) outputs the final T or H block
  1975. if( tMode )
  1976. {
  1977. // Put the compress params into the compression block
  1978. compressed1 |= ( colorsRGB444[0][R] & 0xf ) << 23;
  1979. compressed1 |= ( colorsRGB444[0][G] & 0xf ) << 19;
  1980. compressed1 |= ( colorsRGB444[0][B] ) << 15;
  1981. compressed1 |= ( colorsRGB444[1][R] ) << 11;
  1982. compressed1 |= ( colorsRGB444[1][G] ) << 7;
  1983. compressed1 |= ( colorsRGB444[1][B] ) << 3;
  1984. compressed1 |= bestDist & 0x7;
  1985. }
  1986. else
  1987. {
  1988. int bestRGB444ColPacked[2];
  1989. bestRGB444ColPacked[0] = (colorsRGB444[0][R] << 8) + (colorsRGB444[0][G] << 4) + colorsRGB444[0][B];
  1990. bestRGB444ColPacked[1] = (colorsRGB444[1][R] << 8) + (colorsRGB444[1][G] << 4) + colorsRGB444[1][B];
  1991. if( ( bestRGB444ColPacked[0] >= bestRGB444ColPacked[1] ) ^ ( ( bestDist & 1 ) == 1 ) )
  1992. {
  1993. swapColors( colorsRGB444 );
  1994. // Reshuffle pixel indices to to exchange C1 with C3, and C2 with C4
  1995. bestPixIndices = ( 0x55555555 & bestPixIndices ) | ( 0xaaaaaaaa & ( ~bestPixIndices ) );
  1996. }
  1997. // Put the compress params into the compression block
  1998. compressed1 |= ( colorsRGB444[0][R] & 0xf ) << 22;
  1999. compressed1 |= ( colorsRGB444[0][G] & 0xf ) << 18;
  2000. compressed1 |= ( colorsRGB444[0][B] & 0xf ) << 14;
  2001. compressed1 |= ( colorsRGB444[1][R] & 0xf ) << 10;
  2002. compressed1 |= ( colorsRGB444[1][G] & 0xf ) << 6;
  2003. compressed1 |= ( colorsRGB444[1][B] & 0xf ) << 2;
  2004. compressed1 |= ( bestDist >> 1 ) & 0x3;
  2005. }
  2006. bestPixIndices = indexConversion( bestPixIndices );
  2007. compressed2 = 0;
  2008. compressed2 = ( compressed2 & ~( ( 0x2 << 31 ) - 1 ) ) | ( bestPixIndices & ( ( 2 << 31 ) - 1 ) );
  2009. return bestErr;
  2010. }
  2011. //#endif
  2012. template<class T, class S>
  2013. static etcpak_force_inline uint64_t EncodeSelectors( uint64_t d, const T terr[2][8], const S tsel[16][8], const uint32_t* id, const uint64_t value, const uint64_t error)
  2014. {
  2015. size_t tidx[2];
  2016. tidx[0] = GetLeastError( terr[0], 8 );
  2017. tidx[1] = GetLeastError( terr[1], 8 );
  2018. if ((terr[0][tidx[0]] + terr[1][tidx[1]]) >= error)
  2019. {
  2020. return value;
  2021. }
  2022. d |= tidx[0] << 26;
  2023. d |= tidx[1] << 29;
  2024. for( int i=0; i<16; i++ )
  2025. {
  2026. uint64_t t = tsel[i][tidx[id[i]%2]];
  2027. d |= ( t & 0x1 ) << ( i + 32 );
  2028. d |= ( t & 0x2 ) << ( i + 47 );
  2029. }
  2030. return FixByteOrder(d);
  2031. }
  2032. }
  2033. static etcpak_force_inline uint64_t ProcessRGB( const uint8_t* src )
  2034. {
  2035. #ifdef __AVX2__
  2036. uint64_t d = CheckSolid_AVX2( src );
  2037. if( d != 0 ) return d;
  2038. alignas(32) v4i a[8];
  2039. __m128i err0 = PrepareAverages_AVX2( a, src );
  2040. // Get index of minimum error (err0)
  2041. __m128i err1 = _mm_shuffle_epi32(err0, _MM_SHUFFLE(2, 3, 0, 1));
  2042. __m128i errMin0 = _mm_min_epu32(err0, err1);
  2043. __m128i errMin1 = _mm_shuffle_epi32(errMin0, _MM_SHUFFLE(1, 0, 3, 2));
  2044. __m128i errMin2 = _mm_min_epu32(errMin1, errMin0);
  2045. __m128i errMask = _mm_cmpeq_epi32(errMin2, err0);
  2046. uint32_t mask = _mm_movemask_epi8(errMask);
  2047. uint32_t idx = _bit_scan_forward(mask) >> 2;
  2048. d |= EncodeAverages_AVX2( a, idx );
  2049. alignas(32) uint32_t terr[2][8] = {};
  2050. alignas(32) uint32_t tsel[8];
  2051. if ((idx == 0) || (idx == 2))
  2052. {
  2053. FindBestFit_4x2_AVX2( terr, tsel, a, idx * 2, src );
  2054. }
  2055. else
  2056. {
  2057. FindBestFit_2x4_AVX2( terr, tsel, a, idx * 2, src );
  2058. }
  2059. return EncodeSelectors_AVX2( d, terr, tsel, (idx % 2) == 1 );
  2060. #else
  2061. uint64_t d = CheckSolid( src );
  2062. if( d != 0 ) return d;
  2063. v4i a[8];
  2064. unsigned int err[4] = {};
  2065. PrepareAverages( a, src, err );
  2066. size_t idx = GetLeastError( err, 4 );
  2067. EncodeAverages( d, a, idx );
  2068. #if ( defined __SSE4_1__ || defined __ARM_NEON ) && !defined REFERENCE_IMPLEMENTATION
  2069. uint32_t terr[2][8] = {};
  2070. #else
  2071. uint64_t terr[2][8] = {};
  2072. #endif
  2073. uint16_t tsel[16][8];
  2074. auto id = g_id[idx];
  2075. FindBestFit( terr, tsel, a, id, src );
  2076. return FixByteOrder( EncodeSelectors( d, terr, tsel, id ) );
  2077. #endif
  2078. }
  2079. #ifdef __AVX2__
  2080. // horizontal min/max functions. https://stackoverflow.com/questions/22256525/horizontal-minimum-and-maximum-using-sse
  2081. // if an error occurs in GCC, please change the value of -march in CFLAGS to a specific value for your CPU (e.g., skylake).
  2082. static inline int16_t hMax( __m128i buffer, uint8_t& idx )
  2083. {
  2084. __m128i tmp1 = _mm_sub_epi8( _mm_set1_epi8( (char)( 255 ) ), buffer );
  2085. __m128i tmp2 = _mm_min_epu8( tmp1, _mm_srli_epi16( tmp1, 8 ) );
  2086. __m128i tmp3 = _mm_minpos_epu16( tmp2 );
  2087. uint8_t result = 255 - (uint8_t)_mm_cvtsi128_si32( tmp3 );
  2088. __m128i mask = _mm_cmpeq_epi8( buffer, _mm_set1_epi8( result ) );
  2089. idx = _tzcnt_u32( _mm_movemask_epi8( mask ) );
  2090. return result;
  2091. }
  2092. #elif defined __ARM_NEON && defined __aarch64__
  2093. static inline int16_t hMax( uint8x16_t buffer, uint8_t& idx )
  2094. {
  2095. const uint8_t max = vmaxvq_u8( buffer );
  2096. const uint16x8_t vmax = vdupq_n_u16( max );
  2097. uint8x16x2_t buff_wide = vzipq_u8( buffer, uint8x16_t() );
  2098. uint16x8_t lowbuf16 = vreinterpretq_u16_u8( buff_wide.val[0] );
  2099. uint16x8_t hibuf16 = vreinterpretq_u16_u8( buff_wide.val[1] );
  2100. uint16x8_t low_eqmask = vceqq_u16( lowbuf16, vmax );
  2101. uint16x8_t hi_eqmask = vceqq_u16( hibuf16, vmax );
  2102. static const uint16_t mask_lsb[] = {
  2103. 0x1, 0x2, 0x4, 0x8,
  2104. 0x10, 0x20, 0x40, 0x80 };
  2105. static const uint16_t mask_msb[] = {
  2106. 0x100, 0x200, 0x400, 0x800,
  2107. 0x1000, 0x2000, 0x4000, 0x8000 };
  2108. uint16x8_t vmask_lsb = vld1q_u16( mask_lsb );
  2109. uint16x8_t vmask_msb = vld1q_u16( mask_msb );
  2110. uint16x8_t pos_lsb = vandq_u16( vmask_lsb, low_eqmask );
  2111. uint16x8_t pos_msb = vandq_u16( vmask_msb, hi_eqmask );
  2112. pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
  2113. pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
  2114. pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
  2115. uint64_t idx_lane1 = vgetq_lane_u64( vreinterpretq_u64_u16( pos_lsb ), 0 );
  2116. pos_msb = vpaddq_u16( pos_msb, pos_msb );
  2117. pos_msb = vpaddq_u16( pos_msb, pos_msb );
  2118. pos_msb = vpaddq_u16( pos_msb, pos_msb );
  2119. uint32_t idx_lane2 = vgetq_lane_u32( vreinterpretq_u32_u16( pos_msb ), 0 );
  2120. idx = idx_lane1 != 0 ? __builtin_ctz( idx_lane1 ) : __builtin_ctz( idx_lane2 );
  2121. return max;
  2122. }
  2123. #endif
  2124. #ifdef __AVX2__
  2125. static inline int16_t hMin( __m128i buffer, uint8_t& idx )
  2126. {
  2127. __m128i tmp2 = _mm_min_epu8( buffer, _mm_srli_epi16( buffer, 8 ) );
  2128. __m128i tmp3 = _mm_minpos_epu16( tmp2 );
  2129. uint8_t result = (uint8_t)_mm_cvtsi128_si32( tmp3 );
  2130. __m128i mask = _mm_cmpeq_epi8( buffer, _mm_set1_epi8( result ) );
  2131. idx = _tzcnt_u32( _mm_movemask_epi8( mask ) );
  2132. return result;
  2133. }
  2134. #elif defined __ARM_NEON && defined __aarch64__
  2135. static inline int16_t hMin( uint8x16_t buffer, uint8_t& idx )
  2136. {
  2137. const uint8_t min = vminvq_u8( buffer );
  2138. const uint16x8_t vmin = vdupq_n_u16( min );
  2139. uint8x16x2_t buff_wide = vzipq_u8( buffer, uint8x16_t() );
  2140. uint16x8_t lowbuf16 = vreinterpretq_u16_u8( buff_wide.val[0] );
  2141. uint16x8_t hibuf16 = vreinterpretq_u16_u8( buff_wide.val[1] );
  2142. uint16x8_t low_eqmask = vceqq_u16( lowbuf16, vmin );
  2143. uint16x8_t hi_eqmask = vceqq_u16( hibuf16, vmin );
  2144. static const uint16_t mask_lsb[] = {
  2145. 0x1, 0x2, 0x4, 0x8,
  2146. 0x10, 0x20, 0x40, 0x80 };
  2147. static const uint16_t mask_msb[] = {
  2148. 0x100, 0x200, 0x400, 0x800,
  2149. 0x1000, 0x2000, 0x4000, 0x8000 };
  2150. uint16x8_t vmask_lsb = vld1q_u16( mask_lsb );
  2151. uint16x8_t vmask_msb = vld1q_u16( mask_msb );
  2152. uint16x8_t pos_lsb = vandq_u16( vmask_lsb, low_eqmask );
  2153. uint16x8_t pos_msb = vandq_u16( vmask_msb, hi_eqmask );
  2154. pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
  2155. pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
  2156. pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
  2157. uint64_t idx_lane1 = vgetq_lane_u64( vreinterpretq_u64_u16( pos_lsb ), 0 );
  2158. pos_msb = vpaddq_u16( pos_msb, pos_msb );
  2159. pos_msb = vpaddq_u16( pos_msb, pos_msb );
  2160. pos_msb = vpaddq_u16( pos_msb, pos_msb );
  2161. uint32_t idx_lane2 = vgetq_lane_u32( vreinterpretq_u32_u16( pos_msb ), 0 );
  2162. idx = idx_lane1 != 0 ? __builtin_ctz( idx_lane1 ) : __builtin_ctz( idx_lane2 );
  2163. return min;
  2164. }
  2165. #endif
  2166. // During search it is not convenient to store the bits the way they are stored in the
  2167. // file format. Hence, after search, it is converted to this format.
  2168. // NO WARRANTY --- SEE STATEMENT IN TOP OF FILE (C) Ericsson AB 2005-2013. All Rights Reserved.
  2169. static inline void stuff59bits( unsigned int thumbT59W1, unsigned int thumbT59W2, unsigned int& thumbTW1, unsigned int& thumbTW2 )
  2170. {
  2171. // Put bits in twotimer configuration for 59 (red overflows)
  2172. //
  2173. // Go from this bit layout:
  2174. //
  2175. // |63 62 61 60 59|58 57 56 55|54 53 52 51|50 49 48 47|46 45 44 43|42 41 40 39|38 37 36 35|34 33 32|
  2176. // |----empty-----|---red 0---|--green 0--|--blue 0---|---red 1---|--green 1--|--blue 1---|--dist--|
  2177. //
  2178. // |31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00|
  2179. // |----------------------------------------index bits---------------------------------------------|
  2180. //
  2181. //
  2182. // To this:
  2183. //
  2184. // 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
  2185. // -----------------------------------------------------------------------------------------------
  2186. // |// // //|R0a |//|R0b |G0 |B0 |R1 |G1 |B1 |da |df|db|
  2187. // -----------------------------------------------------------------------------------------------
  2188. //
  2189. // |31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00|
  2190. // |----------------------------------------index bits---------------------------------------------|
  2191. //
  2192. // 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
  2193. // -----------------------------------------------------------------------------------------------
  2194. // | base col1 | dcol 2 | base col1 | dcol 2 | base col 1 | dcol 2 | table | table |df|fp|
  2195. // | R1' (5 bits) | dR2 | G1' (5 bits) | dG2 | B1' (5 bits) | dB2 | cw 1 | cw 2 |bt|bt|
  2196. // ------------------------------------------------------------------------------------------------
  2197. uint8_t R0a;
  2198. uint8_t bit, a, b, c, d, bits;
  2199. R0a = ( thumbT59W1 >> 25 ) & 0x3;
  2200. // Fix middle part
  2201. thumbTW1 = thumbT59W1 << 1;
  2202. // Fix R0a (top two bits of R0)
  2203. thumbTW1 = ( thumbTW1 & ~( 0x3 << 27 ) ) | ( ( R0a & 0x3 ) << 27 );
  2204. // Fix db (lowest bit of d)
  2205. thumbTW1 = ( thumbTW1 & ~0x1 ) | ( thumbT59W1 & 0x1 );
  2206. // Make sure that red overflows:
  2207. a = ( thumbTW1 >> 28 ) & 0x1;
  2208. b = ( thumbTW1 >> 27 ) & 0x1;
  2209. c = ( thumbTW1 >> 25 ) & 0x1;
  2210. d = ( thumbTW1 >> 24 ) & 0x1;
  2211. // The following bit abcd bit sequences should be padded with ones: 0111, 1010, 1011, 1101, 1110, 1111
  2212. // The following logical expression checks for the presence of any of those:
  2213. bit = ( a & c ) | ( !a & b & c & d ) | ( a & b & !c & d );
  2214. bits = 0xf * bit;
  2215. thumbTW1 = ( thumbTW1 & ~( 0x7 << 29 ) ) | ( bits & 0x7 ) << 29;
  2216. thumbTW1 = ( thumbTW1 & ~( 0x1 << 26 ) ) | ( !bit & 0x1 ) << 26;
  2217. // Set diffbit
  2218. thumbTW1 = ( thumbTW1 & ~0x2 ) | 0x2;
  2219. thumbTW2 = thumbT59W2;
  2220. }
  2221. // During search it is not convenient to store the bits the way they are stored in the
  2222. // file format. Hence, after search, it is converted to this format.
  2223. // NO WARRANTY --- SEE STATEMENT IN TOP OF FILE (C) Ericsson AB 2005-2013. All Rights Reserved.
  2224. static inline void stuff58bits( unsigned int thumbH58W1, unsigned int thumbH58W2, unsigned int& thumbHW1, unsigned int& thumbHW2 )
  2225. {
  2226. // Put bits in twotimer configuration for 58 (red doesn't overflow, green does)
  2227. //
  2228. // Go from this bit layout:
  2229. //
  2230. //
  2231. // |63 62 61 60 59 58|57 56 55 54|53 52 51 50|49 48 47 46|45 44 43 42|41 40 39 38|37 36 35 34|33 32|
  2232. // |-------empty-----|---red 0---|--green 0--|--blue 0---|---red 1---|--green 1--|--blue 1---|d2 d1|
  2233. //
  2234. // |31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00|
  2235. // |---------------------------------------index bits----------------------------------------------|
  2236. //
  2237. // To this:
  2238. //
  2239. // 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
  2240. // -----------------------------------------------------------------------------------------------
  2241. // |//|R0 |G0 |// // //|G0|B0|//|B0b |R1 |G1 |B0 |d2|df|d1|
  2242. // -----------------------------------------------------------------------------------------------
  2243. //
  2244. // |31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 09 08 07 06 05 04 03 02 01 00|
  2245. // |---------------------------------------index bits----------------------------------------------|
  2246. //
  2247. // 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
  2248. // -----------------------------------------------------------------------------------------------
  2249. // | base col1 | dcol 2 | base col1 | dcol 2 | base col 1 | dcol 2 | table | table |df|fp|
  2250. // | R1' (5 bits) | dR2 | G1' (5 bits) | dG2 | B1' (5 bits) | dB2 | cw 1 | cw 2 |bt|bt|
  2251. // -----------------------------------------------------------------------------------------------
  2252. //
  2253. //
  2254. // Thus, what we are really doing is going from this bit layout:
  2255. //
  2256. //
  2257. // |63 62 61 60 59 58|57 56 55 54 53 52 51|50 49|48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33|32 |
  2258. // |-------empty-----|part0---------------|part1|part2------------------------------------------|part3|
  2259. //
  2260. // To this:
  2261. //
  2262. // 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32
  2263. // --------------------------------------------------------------------------------------------------|
  2264. // |//|part0 |// // //|part1|//|part2 |df|part3|
  2265. // --------------------------------------------------------------------------------------------------|
  2266. unsigned int part0, part1, part2, part3;
  2267. uint8_t bit, a, b, c, d, bits;
  2268. // move parts
  2269. part0 = ( thumbH58W1 >> 19 ) & 0x7f;
  2270. part1 = ( thumbH58W1 >> 17 ) & 0x3;
  2271. part2 = ( thumbH58W1 >> 1 ) & 0xffff;
  2272. part3 = thumbH58W1 & 0x1;
  2273. thumbHW1 = 0;
  2274. thumbHW1 = ( thumbHW1 & ~( 0x7f << 24 ) ) | ( ( part0 & 0x7f ) << 24 );
  2275. thumbHW1 = ( thumbHW1 & ~( 0x3 << 19 ) ) | ( ( part1 & 0x3 ) << 19 );
  2276. thumbHW1 = ( thumbHW1 & ~( 0xffff << 2 ) ) | ( ( part2 & 0xffff ) << 2 );
  2277. thumbHW1 = ( thumbHW1 & ~0x1 ) | ( part3 & 0x1 );
  2278. // Make sure that red does not overflow:
  2279. bit = ( thumbHW1 >> 30 ) & 0x1;
  2280. thumbHW1 = ( thumbHW1 & ~( 0x1 << 31 ) ) | ( ( !bit & 0x1 ) << 31 );
  2281. // Make sure that green overflows:
  2282. a = ( thumbHW1 >> 20 ) & 0x1;
  2283. b = ( thumbHW1 >> 19 ) & 0x1;
  2284. c = ( thumbHW1 >> 17 ) & 0x1;
  2285. d = ( thumbHW1 >> 16 ) & 0x1;
  2286. // The following bit abcd bit sequences should be padded with ones: 0111, 1010, 1011, 1101, 1110, 1111
  2287. // The following logical expression checks for the presence of any of those:
  2288. bit = ( a & c ) | ( !a & b & c & d ) | ( a & b & !c & d );
  2289. bits = 0xf * bit;
  2290. thumbHW1 = ( thumbHW1 & ~( 0x7 << 21 ) ) | ( ( bits & 0x7 ) << 21 );
  2291. thumbHW1 = ( thumbHW1 & ~( 0x1 << 18 ) ) | ( ( !bit & 0x1 ) << 18 );
  2292. // Set diffbit
  2293. thumbHW1 = ( thumbHW1 & ~0x2 ) | 0x2;
  2294. thumbHW2 = thumbH58W2;
  2295. }
  2296. #if defined __AVX2__ || (defined __ARM_NEON && defined __aarch64__)
  2297. static etcpak_force_inline Channels GetChannels( const uint8_t* src )
  2298. {
  2299. Channels ch;
  2300. #ifdef __AVX2__
  2301. __m128i d0 = _mm_loadu_si128( ( (__m128i*)src ) + 0 );
  2302. __m128i d1 = _mm_loadu_si128( ( (__m128i*)src ) + 1 );
  2303. __m128i d2 = _mm_loadu_si128( ( (__m128i*)src ) + 2 );
  2304. __m128i d3 = _mm_loadu_si128( ( (__m128i*)src ) + 3 );
  2305. __m128i rgb0 = _mm_shuffle_epi8( d0, _mm_setr_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1 ) );
  2306. __m128i rgb1 = _mm_shuffle_epi8( d1, _mm_setr_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1 ) );
  2307. __m128i rgb2 = _mm_shuffle_epi8( d2, _mm_setr_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1 ) );
  2308. __m128i rgb3 = _mm_shuffle_epi8( d3, _mm_setr_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1 ) );
  2309. __m128i rg0 = _mm_unpacklo_epi32( rgb0, rgb1 );
  2310. __m128i rg1 = _mm_unpacklo_epi32( rgb2, rgb3 );
  2311. __m128i b0 = _mm_unpackhi_epi32( rgb0, rgb1 );
  2312. __m128i b1 = _mm_unpackhi_epi32( rgb2, rgb3 );
  2313. // swap channels
  2314. ch.b8 = _mm_unpacklo_epi64( rg0, rg1 );
  2315. ch.g8 = _mm_unpackhi_epi64( rg0, rg1 );
  2316. ch.r8 = _mm_unpacklo_epi64( b0, b1 );
  2317. #elif defined __ARM_NEON && defined __aarch64__
  2318. //load pixel data into 4 rows
  2319. uint8x16_t px0 = vld1q_u8( src + 0 );
  2320. uint8x16_t px1 = vld1q_u8( src + 16 );
  2321. uint8x16_t px2 = vld1q_u8( src + 32 );
  2322. uint8x16_t px3 = vld1q_u8( src + 48 );
  2323. uint8x16x2_t px0z1 = vzipq_u8( px0, px1 );
  2324. uint8x16x2_t px2z3 = vzipq_u8( px2, px3 );
  2325. uint8x16x2_t px01 = vzipq_u8( px0z1.val[0], px0z1.val[1] );
  2326. uint8x16x2_t rgb01 = vzipq_u8( px01.val[0], px01.val[1] );
  2327. uint8x16x2_t px23 = vzipq_u8( px2z3.val[0], px2z3.val[1] );
  2328. uint8x16x2_t rgb23 = vzipq_u8( px23.val[0], px23.val[1] );
  2329. uint8x16_t rr = vreinterpretq_u8_u64( vzip1q_u64( vreinterpretq_u64_u8( rgb01.val[0] ), vreinterpretq_u64_u8( rgb23.val[0] ) ) );
  2330. uint8x16_t gg = vreinterpretq_u8_u64( vzip2q_u64( vreinterpretq_u64_u8( rgb01.val[0] ), vreinterpretq_u64_u8( rgb23.val[0] ) ) );
  2331. uint8x16_t bb = vreinterpretq_u8_u64( vzip1q_u64( vreinterpretq_u64_u8( rgb01.val[1] ), vreinterpretq_u64_u8( rgb23.val[1] ) ) );
  2332. uint8x16x2_t red = vzipq_u8( rr, uint8x16_t() );
  2333. uint8x16x2_t grn = vzipq_u8( gg, uint8x16_t() );
  2334. uint8x16x2_t blu = vzipq_u8( bb, uint8x16_t() );
  2335. ch.r = red;
  2336. ch.b = blu;
  2337. ch.g = grn;
  2338. #endif
  2339. return ch;
  2340. }
  2341. #endif
  2342. #if defined __AVX2__ || (defined __ARM_NEON && defined __aarch64__)
  2343. static etcpak_force_inline void CalculateLuma( Channels& ch, Luma& luma )
  2344. #else
  2345. static etcpak_force_inline void CalculateLuma( const uint8_t* src, Luma& luma )
  2346. #endif
  2347. {
  2348. #ifdef __AVX2__
  2349. __m256i b16_luma = _mm256_mullo_epi16( _mm256_cvtepu8_epi16( ch.b8 ), _mm256_set1_epi16( 14 ) );
  2350. __m256i g16_luma = _mm256_mullo_epi16( _mm256_cvtepu8_epi16( ch.g8 ), _mm256_set1_epi16( 76 ) );
  2351. __m256i r16_luma = _mm256_mullo_epi16( _mm256_cvtepu8_epi16( ch.r8 ), _mm256_set1_epi16( 38 ) );
  2352. __m256i luma_16bit = _mm256_add_epi16( _mm256_add_epi16( g16_luma, r16_luma ), b16_luma );
  2353. __m256i luma_8bit_m256i = _mm256_srli_epi16( luma_16bit, 7 );
  2354. __m128i luma_8bit_lo = _mm256_extractf128_si256( luma_8bit_m256i, 0 );
  2355. __m128i luma_8bit_hi = _mm256_extractf128_si256( luma_8bit_m256i, 1 );
  2356. static const __m128i interleaving_mask_lo = _mm_set_epi8( 15, 13, 11, 9, 7, 5, 3, 1, 14, 12, 10, 8, 6, 4, 2, 0 );
  2357. static const __m128i interleaving_mask_hi = _mm_set_epi8( 14, 12, 10, 8, 6, 4, 2, 0, 15, 13, 11, 9, 7, 5, 3, 1 );
  2358. __m128i luma_8bit_lo_moved = _mm_shuffle_epi8( luma_8bit_lo, interleaving_mask_lo );
  2359. __m128i luma_8bit_hi_moved = _mm_shuffle_epi8( luma_8bit_hi, interleaving_mask_hi );
  2360. __m128i luma_8bit = _mm_or_si128( luma_8bit_hi_moved, luma_8bit_lo_moved );
  2361. luma.luma8 = luma_8bit;
  2362. // min/max calculation
  2363. luma.min = hMin( luma_8bit, luma.minIdx ) * 0.00392156f;
  2364. luma.max = hMax( luma_8bit, luma.maxIdx ) * 0.00392156f;
  2365. #elif defined __ARM_NEON && defined __aarch64__
  2366. //load pixel data into 4 rows
  2367. uint16x8_t red0 = vmulq_n_u16( vreinterpretq_u16_u8( ch.r.val[0] ), 14 );
  2368. uint16x8_t red1 = vmulq_n_u16( vreinterpretq_u16_u8( ch.r.val[1] ), 14 );
  2369. uint16x8_t grn0 = vmulq_n_u16( vreinterpretq_u16_u8( ch.g.val[0] ), 76 );
  2370. uint16x8_t grn1 = vmulq_n_u16( vreinterpretq_u16_u8( ch.g.val[1] ), 76 );
  2371. uint16x8_t blu0 = vmulq_n_u16( vreinterpretq_u16_u8( ch.b.val[0] ), 38 );
  2372. uint16x8_t blu1 = vmulq_n_u16( vreinterpretq_u16_u8( ch.b.val[1] ), 38 );
  2373. //calculate luma for rows 0,1 and 2,3
  2374. uint16x8_t lum_r01 = vaddq_u16( vaddq_u16( red0, grn0 ), blu0 );
  2375. uint16x8_t lum_r23 = vaddq_u16( vaddq_u16( red1, grn1 ), blu1 );
  2376. //divide luma values with right shift and narrow results to 8bit
  2377. uint8x8_t lum_r01_d = vshrn_n_u16( lum_r01, 7 );
  2378. uint8x8_t lum_r02_d = vshrn_n_u16( lum_r23, 7 );
  2379. luma.luma8 = vcombine_u8( lum_r01_d, lum_r02_d );
  2380. //find min and max luma value
  2381. luma.min = hMin( luma.luma8, luma.minIdx ) * 0.00392156f;
  2382. luma.max = hMax( luma.luma8, luma.maxIdx ) * 0.00392156f;
  2383. #else
  2384. for( int i = 0; i < 16; ++i )
  2385. {
  2386. luma.val[i] = ( src[i * 4 + 2] * 76 + src[i * 4 + 1] * 150 + src[i * 4] * 28 ) / 254; // luma calculation
  2387. if( luma.min > luma.val[i] )
  2388. {
  2389. luma.min = luma.val[i];
  2390. luma.minIdx = i;
  2391. }
  2392. if( luma.max < luma.val[i] )
  2393. {
  2394. luma.max = luma.val[i];
  2395. luma.maxIdx = i;
  2396. }
  2397. }
  2398. #endif
  2399. }
  2400. static etcpak_force_inline uint8_t SelectModeETC2( const Luma& luma )
  2401. {
  2402. #if defined __AVX2__ || defined __ARM_NEON
  2403. const float lumaRange = ( luma.max - luma.min );
  2404. #else
  2405. const float lumaRange = ( luma.max - luma.min ) * ( 1.f / 255.f );
  2406. #endif
  2407. // filters a very-low-contrast block
  2408. if( lumaRange <= ecmd_threshold[0] )
  2409. {
  2410. return ModePlanar;
  2411. }
  2412. // checks whether a pair of the corner pixels in a block has the min/max luma values;
  2413. // if so, the ETC2 planar mode is enabled, and otherwise, the ETC1 mode is enabled
  2414. else if( lumaRange <= ecmd_threshold[1] )
  2415. {
  2416. #ifdef __AVX2__
  2417. static const __m128i corner_pair = _mm_set_epi8( 1, 1, 1, 1, 1, 1, 1, 1, 0, 15, 3, 12, 12, 3, 15, 0 );
  2418. __m128i current_max_min = _mm_set_epi8( 0, 0, 0, 0, 0, 0, 0, 0, luma.minIdx, luma.maxIdx, luma.minIdx, luma.maxIdx, luma.minIdx, luma.maxIdx, luma.minIdx, luma.maxIdx );
  2419. __m128i max_min_result = _mm_cmpeq_epi16( corner_pair, current_max_min );
  2420. int mask = _mm_movemask_epi8( max_min_result );
  2421. if( mask )
  2422. {
  2423. return ModePlanar;
  2424. }
  2425. #else
  2426. // check whether a pair of the corner pixels in a block has the min/max luma values;
  2427. // if so, the ETC2 planar mode is enabled.
  2428. if( ( luma.minIdx == 0 && luma.maxIdx == 15 ) ||
  2429. ( luma.minIdx == 15 && luma.maxIdx == 0 ) ||
  2430. ( luma.minIdx == 3 && luma.maxIdx == 12 ) ||
  2431. ( luma.minIdx == 12 && luma.maxIdx == 3 ) )
  2432. {
  2433. return ModePlanar;
  2434. }
  2435. #endif
  2436. }
  2437. // filters a high-contrast block for checking both ETC1 mode and the ETC2 T/H mode
  2438. else if( lumaRange >= ecmd_threshold[2] )
  2439. {
  2440. return ModeTH;
  2441. }
  2442. return ModeUndecided;
  2443. }
  2444. static etcpak_force_inline uint64_t ProcessRGB_ETC2( const uint8_t* src, bool useHeuristics )
  2445. {
  2446. #ifdef __AVX2__
  2447. uint64_t d = CheckSolid_AVX2( src );
  2448. if( d != 0 ) return d;
  2449. #else
  2450. uint64_t d = CheckSolid( src );
  2451. if (d != 0) return d;
  2452. #endif
  2453. uint8_t mode = ModeUndecided;
  2454. Luma luma;
  2455. #ifdef __AVX2__
  2456. Channels ch = GetChannels( src );
  2457. if( useHeuristics )
  2458. {
  2459. CalculateLuma( ch, luma );
  2460. mode = SelectModeETC2( luma );
  2461. }
  2462. auto plane = Planar_AVX2( ch, mode, useHeuristics );
  2463. if( useHeuristics && mode == ModePlanar ) return plane.plane;
  2464. alignas( 32 ) v4i a[8];
  2465. __m128i err0 = PrepareAverages_AVX2( a, plane.sum4 );
  2466. // Get index of minimum error (err0)
  2467. __m128i err1 = _mm_shuffle_epi32( err0, _MM_SHUFFLE( 2, 3, 0, 1 ) );
  2468. __m128i errMin0 = _mm_min_epu32(err0, err1);
  2469. __m128i errMin1 = _mm_shuffle_epi32( errMin0, _MM_SHUFFLE( 1, 0, 3, 2 ) );
  2470. __m128i errMin2 = _mm_min_epu32( errMin1, errMin0 );
  2471. __m128i errMask = _mm_cmpeq_epi32( errMin2, err0 );
  2472. uint32_t mask = _mm_movemask_epi8( errMask );
  2473. size_t idx = _bit_scan_forward( mask ) >> 2;
  2474. d = EncodeAverages_AVX2( a, idx );
  2475. alignas(32) uint32_t terr[2][8] = {};
  2476. alignas(32) uint32_t tsel[8];
  2477. if ((idx == 0) || (idx == 2))
  2478. {
  2479. FindBestFit_4x2_AVX2( terr, tsel, a, idx * 2, src );
  2480. }
  2481. else
  2482. {
  2483. FindBestFit_2x4_AVX2( terr, tsel, a, idx * 2, src );
  2484. }
  2485. if( useHeuristics )
  2486. {
  2487. if( mode == ModeTH )
  2488. {
  2489. uint64_t result = 0;
  2490. uint64_t error = 0;
  2491. uint32_t compressed[4] = { 0, 0, 0, 0 };
  2492. bool tMode = false;
  2493. error = compressBlockTH( (uint8_t*)src, luma, compressed[0], compressed[1], tMode, ch.r8, ch.g8, ch.b8 );
  2494. if( tMode )
  2495. {
  2496. stuff59bits( compressed[0], compressed[1], compressed[2], compressed[3] );
  2497. }
  2498. else
  2499. {
  2500. stuff58bits( compressed[0], compressed[1], compressed[2], compressed[3] );
  2501. }
  2502. result = (uint32_t)_bswap( compressed[2] );
  2503. result |= static_cast<uint64_t>( _bswap( compressed[3] ) ) << 32;
  2504. plane.plane = result;
  2505. plane.error = error;
  2506. }
  2507. else
  2508. {
  2509. plane.plane = 0;
  2510. plane.error = MaxError;
  2511. }
  2512. }
  2513. return EncodeSelectors_AVX2( d, terr, tsel, ( idx % 2 ) == 1, plane.plane, plane.error );
  2514. #else
  2515. if( useHeuristics )
  2516. {
  2517. #if defined __ARM_NEON && defined __aarch64__
  2518. Channels ch = GetChannels( src );
  2519. CalculateLuma( ch, luma );
  2520. #else
  2521. CalculateLuma( src, luma );
  2522. #endif
  2523. mode = SelectModeETC2( luma );
  2524. }
  2525. #ifdef __ARM_NEON
  2526. auto result = Planar_NEON( src, mode, useHeuristics );
  2527. #else
  2528. auto result = Planar( src, mode, useHeuristics );
  2529. #endif
  2530. if( result.second == 0 ) return result.first;
  2531. v4i a[8];
  2532. unsigned int err[4] = {};
  2533. PrepareAverages( a, src, err );
  2534. size_t idx = GetLeastError( err, 4 );
  2535. EncodeAverages( d, a, idx );
  2536. #if ( defined __SSE4_1__ || defined __ARM_NEON ) && !defined REFERENCE_IMPLEMENTATION
  2537. uint32_t terr[2][8] = {};
  2538. #else
  2539. uint64_t terr[2][8] = {};
  2540. #endif
  2541. uint16_t tsel[16][8];
  2542. auto id = g_id[idx];
  2543. FindBestFit( terr, tsel, a, id, src );
  2544. if( useHeuristics )
  2545. {
  2546. if( mode == ModeTH )
  2547. {
  2548. uint32_t compressed[4] = { 0, 0, 0, 0 };
  2549. bool tMode = false;
  2550. result.second = compressBlockTH( (uint8_t*)src, luma, compressed[0], compressed[1], tMode );
  2551. if( tMode )
  2552. {
  2553. stuff59bits( compressed[0], compressed[1], compressed[2], compressed[3] );
  2554. }
  2555. else
  2556. {
  2557. stuff58bits( compressed[0], compressed[1], compressed[2], compressed[3] );
  2558. }
  2559. result.first = (uint32_t)_bswap( compressed[2] );
  2560. result.first |= static_cast<uint64_t>( _bswap( compressed[3] ) ) << 32;
  2561. }
  2562. else
  2563. {
  2564. result.first = 0;
  2565. result.second = MaxError;
  2566. }
  2567. }
  2568. return EncodeSelectors( d, terr, tsel, id, result.first, result.second );
  2569. #endif
  2570. }
  2571. #ifdef __SSE4_1__
  2572. template<int K>
  2573. static etcpak_force_inline __m128i Widen( const __m128i src )
  2574. {
  2575. static_assert( K >= 0 && K <= 7, "Index out of range" );
  2576. __m128i tmp;
  2577. switch( K )
  2578. {
  2579. case 0:
  2580. tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 0, 0, 0, 0 ) );
  2581. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
  2582. case 1:
  2583. tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 1, 1, 1, 1 ) );
  2584. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
  2585. case 2:
  2586. tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  2587. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
  2588. case 3:
  2589. tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 3, 3, 3, 3 ) );
  2590. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
  2591. case 4:
  2592. tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 0, 0, 0, 0 ) );
  2593. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  2594. case 5:
  2595. tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 1, 1, 1, 1 ) );
  2596. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  2597. case 6:
  2598. tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  2599. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  2600. case 7:
  2601. tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 3, 3, 3, 3 ) );
  2602. return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
  2603. }
  2604. }
  2605. static etcpak_force_inline int GetMulSel( int sel )
  2606. {
  2607. switch( sel )
  2608. {
  2609. case 0:
  2610. return 0;
  2611. case 1:
  2612. case 2:
  2613. case 3:
  2614. return 1;
  2615. case 4:
  2616. return 2;
  2617. case 5:
  2618. case 6:
  2619. case 7:
  2620. return 3;
  2621. case 8:
  2622. case 9:
  2623. case 10:
  2624. case 11:
  2625. case 12:
  2626. case 13:
  2627. return 4;
  2628. case 14:
  2629. case 15:
  2630. return 5;
  2631. }
  2632. }
  2633. #endif
  2634. #ifdef __ARM_NEON
  2635. static constexpr etcpak_force_inline int GetMulSel(int sel)
  2636. {
  2637. return ( sel < 1 ) ? 0 : ( sel < 4 ) ? 1 : ( sel < 5 ) ? 2 : ( sel < 8 ) ? 3 : ( sel < 14 ) ? 4 : 5;
  2638. }
  2639. static constexpr int ClampConstant( int x, int min, int max )
  2640. {
  2641. return x < min ? min : x > max ? max : x;
  2642. }
  2643. template <int Index>
  2644. etcpak_force_inline static uint16x8_t ErrorProbe_EAC_NEON( uint8x8_t recVal, uint8x16_t alphaBlock )
  2645. {
  2646. uint8x8_t srcValWide;
  2647. #ifndef __aarch64__
  2648. if( Index < 8 )
  2649. srcValWide = vdup_lane_u8( vget_low_u8( alphaBlock ), ClampConstant( Index, 0, 7 ) );
  2650. else
  2651. srcValWide = vdup_lane_u8( vget_high_u8( alphaBlock ), ClampConstant( Index - 8, 0, 7 ) );
  2652. #else
  2653. srcValWide = vdup_laneq_u8( alphaBlock, Index );
  2654. #endif
  2655. uint8x8_t deltaVal = vabd_u8( srcValWide, recVal );
  2656. return vmull_u8( deltaVal, deltaVal );
  2657. }
  2658. etcpak_force_inline static uint16_t MinError_EAC_NEON( uint16x8_t errProbe )
  2659. {
  2660. #ifndef __aarch64__
  2661. uint16x4_t tmpErr = vpmin_u16( vget_low_u16( errProbe ), vget_high_u16( errProbe ) );
  2662. tmpErr = vpmin_u16( tmpErr, tmpErr );
  2663. return vpmin_u16( tmpErr, tmpErr )[0];
  2664. #else
  2665. return vminvq_u16( errProbe );
  2666. #endif
  2667. }
  2668. template <int Index>
  2669. etcpak_force_inline static uint64_t MinErrorIndex_EAC_NEON( uint8x8_t recVal, uint8x16_t alphaBlock )
  2670. {
  2671. uint16x8_t errProbe = ErrorProbe_EAC_NEON<Index>( recVal, alphaBlock );
  2672. uint16x8_t minErrMask = vceqq_u16( errProbe, vdupq_n_u16( MinError_EAC_NEON( errProbe ) ) );
  2673. uint64_t idx = __builtin_ctzll( vget_lane_u64( vreinterpret_u64_u8( vqmovn_u16( minErrMask ) ), 0 ) );
  2674. idx >>= 3;
  2675. idx <<= 45 - Index * 3;
  2676. return idx;
  2677. }
  2678. template <int Index>
  2679. etcpak_force_inline static int16x8_t WidenMultiplier_EAC_NEON( int16x8_t multipliers )
  2680. {
  2681. constexpr int Lane = GetMulSel( Index );
  2682. #ifndef __aarch64__
  2683. if( Lane < 4 )
  2684. return vdupq_lane_s16( vget_low_s16( multipliers ), ClampConstant( Lane, 0, 3 ) );
  2685. else
  2686. return vdupq_lane_s16( vget_high_s16( multipliers ), ClampConstant( Lane - 4, 0, 3 ) );
  2687. #else
  2688. return vdupq_laneq_s16( multipliers, Lane );
  2689. #endif
  2690. }
  2691. #endif
  2692. static etcpak_force_inline uint64_t ProcessAlpha_ETC2( const uint8_t* src )
  2693. {
  2694. #if defined __SSE4_1__
  2695. // Check solid
  2696. __m128i s = _mm_loadu_si128( (__m128i*)src );
  2697. __m128i solidCmp = _mm_set1_epi8( src[0] );
  2698. __m128i cmpRes = _mm_cmpeq_epi8( s, solidCmp );
  2699. if( _mm_testc_si128( cmpRes, _mm_set1_epi32( -1 ) ) )
  2700. {
  2701. return src[0];
  2702. }
  2703. // Calculate min, max
  2704. __m128i s1 = _mm_shuffle_epi32( s, _MM_SHUFFLE( 2, 3, 0, 1 ) );
  2705. __m128i max1 = _mm_max_epu8( s, s1 );
  2706. __m128i min1 = _mm_min_epu8( s, s1 );
  2707. __m128i smax2 = _mm_shuffle_epi32( max1, _MM_SHUFFLE( 0, 0, 2, 2 ) );
  2708. __m128i smin2 = _mm_shuffle_epi32( min1, _MM_SHUFFLE( 0, 0, 2, 2 ) );
  2709. __m128i max2 = _mm_max_epu8( max1, smax2 );
  2710. __m128i min2 = _mm_min_epu8( min1, smin2 );
  2711. __m128i smax3 = _mm_alignr_epi8( max2, max2, 2 );
  2712. __m128i smin3 = _mm_alignr_epi8( min2, min2, 2 );
  2713. __m128i max3 = _mm_max_epu8( max2, smax3 );
  2714. __m128i min3 = _mm_min_epu8( min2, smin3 );
  2715. __m128i smax4 = _mm_alignr_epi8( max3, max3, 1 );
  2716. __m128i smin4 = _mm_alignr_epi8( min3, min3, 1 );
  2717. __m128i max = _mm_max_epu8( max3, smax4 );
  2718. __m128i min = _mm_min_epu8( min3, smin4 );
  2719. __m128i max16 = _mm_unpacklo_epi8( max, _mm_setzero_si128() );
  2720. __m128i min16 = _mm_unpacklo_epi8( min, _mm_setzero_si128() );
  2721. // src range, mid
  2722. __m128i srcRange = _mm_sub_epi16( max16, min16 );
  2723. __m128i srcRangeHalf = _mm_srli_epi16( srcRange, 1 );
  2724. __m128i srcMid = _mm_add_epi16( min16, srcRangeHalf );
  2725. // multiplier
  2726. __m128i mul1 = _mm_mulhi_epi16( srcRange, g_alphaRange_SIMD );
  2727. __m128i mul = _mm_add_epi16( mul1, _mm_set1_epi16( 1 ) );
  2728. // wide source
  2729. __m128i s16_1 = _mm_shuffle_epi32( s, _MM_SHUFFLE( 3, 2, 3, 2 ) );
  2730. __m128i s16[2] = { _mm_unpacklo_epi8( s, _mm_setzero_si128() ), _mm_unpacklo_epi8( s16_1, _mm_setzero_si128() ) };
  2731. __m128i sr[16] = {
  2732. Widen<0>( s16[0] ),
  2733. Widen<1>( s16[0] ),
  2734. Widen<2>( s16[0] ),
  2735. Widen<3>( s16[0] ),
  2736. Widen<4>( s16[0] ),
  2737. Widen<5>( s16[0] ),
  2738. Widen<6>( s16[0] ),
  2739. Widen<7>( s16[0] ),
  2740. Widen<0>( s16[1] ),
  2741. Widen<1>( s16[1] ),
  2742. Widen<2>( s16[1] ),
  2743. Widen<3>( s16[1] ),
  2744. Widen<4>( s16[1] ),
  2745. Widen<5>( s16[1] ),
  2746. Widen<6>( s16[1] ),
  2747. Widen<7>( s16[1] )
  2748. };
  2749. #ifdef __AVX2__
  2750. __m256i srcRangeWide = _mm256_broadcastsi128_si256( srcRange );
  2751. __m256i srcMidWide = _mm256_broadcastsi128_si256( srcMid );
  2752. __m256i mulWide1 = _mm256_mulhi_epi16( srcRangeWide, g_alphaRange_AVX );
  2753. __m256i mulWide = _mm256_add_epi16( mulWide1, _mm256_set1_epi16( 1 ) );
  2754. __m256i modMul[8] = {
  2755. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[0] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[0] ) ) ), _mm256_setzero_si256() ),
  2756. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[1] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[1] ) ) ), _mm256_setzero_si256() ),
  2757. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[2] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[2] ) ) ), _mm256_setzero_si256() ),
  2758. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[3] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[3] ) ) ), _mm256_setzero_si256() ),
  2759. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[4] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[4] ) ) ), _mm256_setzero_si256() ),
  2760. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[5] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[5] ) ) ), _mm256_setzero_si256() ),
  2761. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[6] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[6] ) ) ), _mm256_setzero_si256() ),
  2762. _mm256_unpacklo_epi8( _mm256_packus_epi16( _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[7] ) ), _mm256_add_epi16( srcMidWide, _mm256_mullo_epi16( mulWide, g_alpha_AVX[7] ) ) ), _mm256_setzero_si256() ),
  2763. };
  2764. // find selector
  2765. __m256i mulErr = _mm256_setzero_si256();
  2766. for( int j=0; j<16; j++ )
  2767. {
  2768. __m256i s16Wide = _mm256_broadcastsi128_si256( sr[j] );
  2769. __m256i err1, err2;
  2770. err1 = _mm256_sub_epi16( s16Wide, modMul[0] );
  2771. __m256i localErr = _mm256_mullo_epi16( err1, err1 );
  2772. err1 = _mm256_sub_epi16( s16Wide, modMul[1] );
  2773. err2 = _mm256_mullo_epi16( err1, err1 );
  2774. localErr = _mm256_min_epu16( localErr, err2 );
  2775. err1 = _mm256_sub_epi16( s16Wide, modMul[2] );
  2776. err2 = _mm256_mullo_epi16( err1, err1 );
  2777. localErr = _mm256_min_epu16( localErr, err2 );
  2778. err1 = _mm256_sub_epi16( s16Wide, modMul[3] );
  2779. err2 = _mm256_mullo_epi16( err1, err1 );
  2780. localErr = _mm256_min_epu16( localErr, err2 );
  2781. err1 = _mm256_sub_epi16( s16Wide, modMul[4] );
  2782. err2 = _mm256_mullo_epi16( err1, err1 );
  2783. localErr = _mm256_min_epu16( localErr, err2 );
  2784. err1 = _mm256_sub_epi16( s16Wide, modMul[5] );
  2785. err2 = _mm256_mullo_epi16( err1, err1 );
  2786. localErr = _mm256_min_epu16( localErr, err2 );
  2787. err1 = _mm256_sub_epi16( s16Wide, modMul[6] );
  2788. err2 = _mm256_mullo_epi16( err1, err1 );
  2789. localErr = _mm256_min_epu16( localErr, err2 );
  2790. err1 = _mm256_sub_epi16( s16Wide, modMul[7] );
  2791. err2 = _mm256_mullo_epi16( err1, err1 );
  2792. localErr = _mm256_min_epu16( localErr, err2 );
  2793. // note that this can overflow, but since we're looking for the smallest error, it shouldn't matter
  2794. mulErr = _mm256_adds_epu16( mulErr, localErr );
  2795. }
  2796. uint64_t minPos1 = _mm_cvtsi128_si64( _mm_minpos_epu16( _mm256_castsi256_si128( mulErr ) ) );
  2797. uint64_t minPos2 = _mm_cvtsi128_si64( _mm_minpos_epu16( _mm256_extracti128_si256( mulErr, 1 ) ) );
  2798. int sel = ( ( minPos1 & 0xFFFF ) < ( minPos2 & 0xFFFF ) ) ? ( minPos1 >> 16 ) : ( 8 + ( minPos2 >> 16 ) );
  2799. __m128i recVal16;
  2800. switch( sel )
  2801. {
  2802. case 0:
  2803. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<0>( mul ), g_alpha_SIMD[0] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<0>( mul ), g_alpha_SIMD[0] ) ) ), _mm_setzero_si128() );
  2804. break;
  2805. case 1:
  2806. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[1] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[1] ) ) ), _mm_setzero_si128() );
  2807. break;
  2808. case 2:
  2809. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[2] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[2] ) ) ), _mm_setzero_si128() );
  2810. break;
  2811. case 3:
  2812. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[3] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[3] ) ) ), _mm_setzero_si128() );
  2813. break;
  2814. case 4:
  2815. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<2>( mul ), g_alpha_SIMD[4] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<2>( mul ), g_alpha_SIMD[4] ) ) ), _mm_setzero_si128() );
  2816. break;
  2817. case 5:
  2818. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[5] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[5] ) ) ), _mm_setzero_si128() );
  2819. break;
  2820. case 6:
  2821. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[6] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[6] ) ) ), _mm_setzero_si128() );
  2822. break;
  2823. case 7:
  2824. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[7] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[7] ) ) ), _mm_setzero_si128() );
  2825. break;
  2826. case 8:
  2827. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[8] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[8] ) ) ), _mm_setzero_si128() );
  2828. break;
  2829. case 9:
  2830. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[9] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[9] ) ) ), _mm_setzero_si128() );
  2831. break;
  2832. case 10:
  2833. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[10] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[10] ) ) ), _mm_setzero_si128() );
  2834. break;
  2835. case 11:
  2836. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[11] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[11] ) ) ), _mm_setzero_si128() );
  2837. break;
  2838. case 12:
  2839. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[12] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[12] ) ) ), _mm_setzero_si128() );
  2840. break;
  2841. case 13:
  2842. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[13] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[13] ) ) ), _mm_setzero_si128() );
  2843. break;
  2844. case 14:
  2845. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[14] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[14] ) ) ), _mm_setzero_si128() );
  2846. break;
  2847. case 15:
  2848. recVal16 = _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[15] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[15] ) ) ), _mm_setzero_si128() );
  2849. break;
  2850. default:
  2851. assert( false );
  2852. break;
  2853. }
  2854. #else
  2855. // wide multiplier
  2856. __m128i rangeMul[16] = {
  2857. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<0>( mul ), g_alpha_SIMD[0] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<0>( mul ), g_alpha_SIMD[0] ) ) ), _mm_setzero_si128() ),
  2858. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[1] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[1] ) ) ), _mm_setzero_si128() ),
  2859. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[2] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[2] ) ) ), _mm_setzero_si128() ),
  2860. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[3] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<1>( mul ), g_alpha_SIMD[3] ) ) ), _mm_setzero_si128() ),
  2861. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<2>( mul ), g_alpha_SIMD[4] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<2>( mul ), g_alpha_SIMD[4] ) ) ), _mm_setzero_si128() ),
  2862. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[5] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[5] ) ) ), _mm_setzero_si128() ),
  2863. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[6] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[6] ) ) ), _mm_setzero_si128() ),
  2864. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[7] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<3>( mul ), g_alpha_SIMD[7] ) ) ), _mm_setzero_si128() ),
  2865. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[8] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[8] ) ) ), _mm_setzero_si128() ),
  2866. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[9] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[9] ) ) ), _mm_setzero_si128() ),
  2867. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[10] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[10] ) ) ), _mm_setzero_si128() ),
  2868. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[11] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[11] ) ) ), _mm_setzero_si128() ),
  2869. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[12] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[12] ) ) ), _mm_setzero_si128() ),
  2870. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[13] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<4>( mul ), g_alpha_SIMD[13] ) ) ), _mm_setzero_si128() ),
  2871. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[14] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[14] ) ) ), _mm_setzero_si128() ),
  2872. _mm_unpacklo_epi8( _mm_packus_epi16( _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[15] ) ), _mm_add_epi16( srcMid, _mm_mullo_epi16( Widen<5>( mul ), g_alpha_SIMD[15] ) ) ), _mm_setzero_si128() )
  2873. };
  2874. // find selector
  2875. int err = std::numeric_limits<int>::max();
  2876. int sel;
  2877. for( int r=0; r<16; r++ )
  2878. {
  2879. __m128i err1, err2, minerr;
  2880. __m128i recVal16 = rangeMul[r];
  2881. int rangeErr;
  2882. err1 = _mm_sub_epi16( sr[0], recVal16 );
  2883. err2 = _mm_mullo_epi16( err1, err1 );
  2884. minerr = _mm_minpos_epu16( err2 );
  2885. rangeErr = _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2886. err1 = _mm_sub_epi16( sr[1], recVal16 );
  2887. err2 = _mm_mullo_epi16( err1, err1 );
  2888. minerr = _mm_minpos_epu16( err2 );
  2889. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2890. err1 = _mm_sub_epi16( sr[2], recVal16 );
  2891. err2 = _mm_mullo_epi16( err1, err1 );
  2892. minerr = _mm_minpos_epu16( err2 );
  2893. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2894. err1 = _mm_sub_epi16( sr[3], recVal16 );
  2895. err2 = _mm_mullo_epi16( err1, err1 );
  2896. minerr = _mm_minpos_epu16( err2 );
  2897. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2898. err1 = _mm_sub_epi16( sr[4], recVal16 );
  2899. err2 = _mm_mullo_epi16( err1, err1 );
  2900. minerr = _mm_minpos_epu16( err2 );
  2901. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2902. err1 = _mm_sub_epi16( sr[5], recVal16 );
  2903. err2 = _mm_mullo_epi16( err1, err1 );
  2904. minerr = _mm_minpos_epu16( err2 );
  2905. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2906. err1 = _mm_sub_epi16( sr[6], recVal16 );
  2907. err2 = _mm_mullo_epi16( err1, err1 );
  2908. minerr = _mm_minpos_epu16( err2 );
  2909. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2910. err1 = _mm_sub_epi16( sr[7], recVal16 );
  2911. err2 = _mm_mullo_epi16( err1, err1 );
  2912. minerr = _mm_minpos_epu16( err2 );
  2913. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2914. err1 = _mm_sub_epi16( sr[8], recVal16 );
  2915. err2 = _mm_mullo_epi16( err1, err1 );
  2916. minerr = _mm_minpos_epu16( err2 );
  2917. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2918. err1 = _mm_sub_epi16( sr[9], recVal16 );
  2919. err2 = _mm_mullo_epi16( err1, err1 );
  2920. minerr = _mm_minpos_epu16( err2 );
  2921. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2922. err1 = _mm_sub_epi16( sr[10], recVal16 );
  2923. err2 = _mm_mullo_epi16( err1, err1 );
  2924. minerr = _mm_minpos_epu16( err2 );
  2925. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2926. err1 = _mm_sub_epi16( sr[11], recVal16 );
  2927. err2 = _mm_mullo_epi16( err1, err1 );
  2928. minerr = _mm_minpos_epu16( err2 );
  2929. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2930. err1 = _mm_sub_epi16( sr[12], recVal16 );
  2931. err2 = _mm_mullo_epi16( err1, err1 );
  2932. minerr = _mm_minpos_epu16( err2 );
  2933. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2934. err1 = _mm_sub_epi16( sr[13], recVal16 );
  2935. err2 = _mm_mullo_epi16( err1, err1 );
  2936. minerr = _mm_minpos_epu16( err2 );
  2937. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2938. err1 = _mm_sub_epi16( sr[14], recVal16 );
  2939. err2 = _mm_mullo_epi16( err1, err1 );
  2940. minerr = _mm_minpos_epu16( err2 );
  2941. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2942. err1 = _mm_sub_epi16( sr[15], recVal16 );
  2943. err2 = _mm_mullo_epi16( err1, err1 );
  2944. minerr = _mm_minpos_epu16( err2 );
  2945. rangeErr += _mm_cvtsi128_si64( minerr ) & 0xFFFF;
  2946. if( rangeErr < err )
  2947. {
  2948. err = rangeErr;
  2949. sel = r;
  2950. if( err == 0 ) break;
  2951. }
  2952. }
  2953. __m128i recVal16 = rangeMul[sel];
  2954. #endif
  2955. // find indices
  2956. __m128i err1, err2, minerr;
  2957. uint64_t idx = 0, tmp;
  2958. err1 = _mm_sub_epi16( sr[0], recVal16 );
  2959. err2 = _mm_mullo_epi16( err1, err1 );
  2960. minerr = _mm_minpos_epu16( err2 );
  2961. tmp = _mm_cvtsi128_si64( minerr );
  2962. idx |= ( tmp >> 16 ) << 15*3;
  2963. err1 = _mm_sub_epi16( sr[1], recVal16 );
  2964. err2 = _mm_mullo_epi16( err1, err1 );
  2965. minerr = _mm_minpos_epu16( err2 );
  2966. tmp = _mm_cvtsi128_si64( minerr );
  2967. idx |= ( tmp >> 16 ) << 14*3;
  2968. err1 = _mm_sub_epi16( sr[2], recVal16 );
  2969. err2 = _mm_mullo_epi16( err1, err1 );
  2970. minerr = _mm_minpos_epu16( err2 );
  2971. tmp = _mm_cvtsi128_si64( minerr );
  2972. idx |= ( tmp >> 16 ) << 13*3;
  2973. err1 = _mm_sub_epi16( sr[3], recVal16 );
  2974. err2 = _mm_mullo_epi16( err1, err1 );
  2975. minerr = _mm_minpos_epu16( err2 );
  2976. tmp = _mm_cvtsi128_si64( minerr );
  2977. idx |= ( tmp >> 16 ) << 12*3;
  2978. err1 = _mm_sub_epi16( sr[4], recVal16 );
  2979. err2 = _mm_mullo_epi16( err1, err1 );
  2980. minerr = _mm_minpos_epu16( err2 );
  2981. tmp = _mm_cvtsi128_si64( minerr );
  2982. idx |= ( tmp >> 16 ) << 11*3;
  2983. err1 = _mm_sub_epi16( sr[5], recVal16 );
  2984. err2 = _mm_mullo_epi16( err1, err1 );
  2985. minerr = _mm_minpos_epu16( err2 );
  2986. tmp = _mm_cvtsi128_si64( minerr );
  2987. idx |= ( tmp >> 16 ) << 10*3;
  2988. err1 = _mm_sub_epi16( sr[6], recVal16 );
  2989. err2 = _mm_mullo_epi16( err1, err1 );
  2990. minerr = _mm_minpos_epu16( err2 );
  2991. tmp = _mm_cvtsi128_si64( minerr );
  2992. idx |= ( tmp >> 16 ) << 9*3;
  2993. err1 = _mm_sub_epi16( sr[7], recVal16 );
  2994. err2 = _mm_mullo_epi16( err1, err1 );
  2995. minerr = _mm_minpos_epu16( err2 );
  2996. tmp = _mm_cvtsi128_si64( minerr );
  2997. idx |= ( tmp >> 16 ) << 8*3;
  2998. err1 = _mm_sub_epi16( sr[8], recVal16 );
  2999. err2 = _mm_mullo_epi16( err1, err1 );
  3000. minerr = _mm_minpos_epu16( err2 );
  3001. tmp = _mm_cvtsi128_si64( minerr );
  3002. idx |= ( tmp >> 16 ) << 7*3;
  3003. err1 = _mm_sub_epi16( sr[9], recVal16 );
  3004. err2 = _mm_mullo_epi16( err1, err1 );
  3005. minerr = _mm_minpos_epu16( err2 );
  3006. tmp = _mm_cvtsi128_si64( minerr );
  3007. idx |= ( tmp >> 16 ) << 6*3;
  3008. err1 = _mm_sub_epi16( sr[10], recVal16 );
  3009. err2 = _mm_mullo_epi16( err1, err1 );
  3010. minerr = _mm_minpos_epu16( err2 );
  3011. tmp = _mm_cvtsi128_si64( minerr );
  3012. idx |= ( tmp >> 16 ) << 5*3;
  3013. err1 = _mm_sub_epi16( sr[11], recVal16 );
  3014. err2 = _mm_mullo_epi16( err1, err1 );
  3015. minerr = _mm_minpos_epu16( err2 );
  3016. tmp = _mm_cvtsi128_si64( minerr );
  3017. idx |= ( tmp >> 16 ) << 4*3;
  3018. err1 = _mm_sub_epi16( sr[12], recVal16 );
  3019. err2 = _mm_mullo_epi16( err1, err1 );
  3020. minerr = _mm_minpos_epu16( err2 );
  3021. tmp = _mm_cvtsi128_si64( minerr );
  3022. idx |= ( tmp >> 16 ) << 3*3;
  3023. err1 = _mm_sub_epi16( sr[13], recVal16 );
  3024. err2 = _mm_mullo_epi16( err1, err1 );
  3025. minerr = _mm_minpos_epu16( err2 );
  3026. tmp = _mm_cvtsi128_si64( minerr );
  3027. idx |= ( tmp >> 16 ) << 2*3;
  3028. err1 = _mm_sub_epi16( sr[14], recVal16 );
  3029. err2 = _mm_mullo_epi16( err1, err1 );
  3030. minerr = _mm_minpos_epu16( err2 );
  3031. tmp = _mm_cvtsi128_si64( minerr );
  3032. idx |= ( tmp >> 16 ) << 1*3;
  3033. err1 = _mm_sub_epi16( sr[15], recVal16 );
  3034. err2 = _mm_mullo_epi16( err1, err1 );
  3035. minerr = _mm_minpos_epu16( err2 );
  3036. tmp = _mm_cvtsi128_si64( minerr );
  3037. idx |= ( tmp >> 16 ) << 0*3;
  3038. uint16_t rm[8];
  3039. _mm_storeu_si128( (__m128i*)rm, mul );
  3040. uint16_t sm = _mm_cvtsi128_si64( srcMid );
  3041. uint64_t d = ( uint64_t( sm ) << 56 ) |
  3042. ( uint64_t( rm[GetMulSel( sel )] ) << 52 ) |
  3043. ( uint64_t( sel ) << 48 ) |
  3044. idx;
  3045. return _bswap64( d );
  3046. #elif defined __ARM_NEON
  3047. int16x8_t srcMidWide, multipliers;
  3048. int srcMid;
  3049. uint8x16_t srcAlphaBlock = vld1q_u8( src );
  3050. {
  3051. uint8_t ref = src[0];
  3052. uint8x16_t a0 = vdupq_n_u8( ref );
  3053. uint8x16_t r = vceqq_u8( srcAlphaBlock, a0 );
  3054. int64x2_t m = vreinterpretq_s64_u8( r );
  3055. if( m[0] == -1 && m[1] == -1 )
  3056. return ref;
  3057. // srcRange
  3058. #ifdef __aarch64__
  3059. uint8_t min = vminvq_u8( srcAlphaBlock );
  3060. uint8_t max = vmaxvq_u8( srcAlphaBlock );
  3061. uint8_t srcRange = max - min;
  3062. multipliers = vqaddq_s16( vshrq_n_s16( vqdmulhq_n_s16( g_alphaRange_NEON, srcRange ), 1 ), vdupq_n_s16( 1 ) );
  3063. srcMid = min + srcRange / 2;
  3064. srcMidWide = vdupq_n_s16( srcMid );
  3065. #else
  3066. uint8x8_t vmin = vpmin_u8( vget_low_u8( srcAlphaBlock ), vget_high_u8( srcAlphaBlock ) );
  3067. vmin = vpmin_u8( vmin, vmin );
  3068. vmin = vpmin_u8( vmin, vmin );
  3069. vmin = vpmin_u8( vmin, vmin );
  3070. uint8x8_t vmax = vpmax_u8( vget_low_u8( srcAlphaBlock ), vget_high_u8( srcAlphaBlock ) );
  3071. vmax = vpmax_u8( vmax, vmax );
  3072. vmax = vpmax_u8( vmax, vmax );
  3073. vmax = vpmax_u8( vmax, vmax );
  3074. int16x8_t srcRangeWide = vreinterpretq_s16_u16( vsubl_u8( vmax, vmin ) );
  3075. multipliers = vqaddq_s16( vshrq_n_s16( vqdmulhq_s16( g_alphaRange_NEON, srcRangeWide ), 1 ), vdupq_n_s16( 1 ) );
  3076. srcMidWide = vsraq_n_s16( vreinterpretq_s16_u16(vmovl_u8(vmin)), srcRangeWide, 1);
  3077. srcMid = vgetq_lane_s16( srcMidWide, 0 );
  3078. #endif
  3079. }
  3080. // calculate reconstructed values
  3081. #define EAC_APPLY_16X( m ) m( 0 ) m( 1 ) m( 2 ) m( 3 ) m( 4 ) m( 5 ) m( 6 ) m( 7 ) m( 8 ) m( 9 ) m( 10 ) m( 11 ) m( 12 ) m( 13 ) m( 14 ) m( 15 )
  3082. #define EAC_RECONSTRUCT_VALUE( n ) vqmovun_s16( vmlaq_s16( srcMidWide, g_alpha_NEON[n], WidenMultiplier_EAC_NEON<n>( multipliers ) ) ),
  3083. uint8x8_t recVals[16] = { EAC_APPLY_16X( EAC_RECONSTRUCT_VALUE ) };
  3084. // find selector
  3085. int err = std::numeric_limits<int>::max();
  3086. int sel = 0;
  3087. for( int r = 0; r < 16; r++ )
  3088. {
  3089. uint8x8_t recVal = recVals[r];
  3090. int rangeErr = 0;
  3091. #define EAC_ACCUMULATE_ERROR( n ) rangeErr += MinError_EAC_NEON( ErrorProbe_EAC_NEON<n>( recVal, srcAlphaBlock ) );
  3092. EAC_APPLY_16X( EAC_ACCUMULATE_ERROR )
  3093. if( rangeErr < err )
  3094. {
  3095. err = rangeErr;
  3096. sel = r;
  3097. if ( err == 0 ) break;
  3098. }
  3099. }
  3100. // combine results
  3101. uint64_t d = ( uint64_t( srcMid ) << 56 ) |
  3102. ( uint64_t( multipliers[GetMulSel( sel )] ) << 52 ) |
  3103. ( uint64_t( sel ) << 48);
  3104. // generate indices
  3105. uint8x8_t recVal = recVals[sel];
  3106. #define EAC_INSERT_INDEX(n) d |= MinErrorIndex_EAC_NEON<n>( recVal, srcAlphaBlock );
  3107. EAC_APPLY_16X( EAC_INSERT_INDEX )
  3108. return _bswap64( d );
  3109. #undef EAC_APPLY_16X
  3110. #undef EAC_INSERT_INDEX
  3111. #undef EAC_ACCUMULATE_ERROR
  3112. #undef EAC_RECONSTRUCT_VALUE
  3113. #else
  3114. {
  3115. bool solid = true;
  3116. const uint8_t* ptr = src + 1;
  3117. const uint8_t ref = *src;
  3118. for( int i=1; i<16; i++ )
  3119. {
  3120. if( ref != *ptr++ )
  3121. {
  3122. solid = false;
  3123. break;
  3124. }
  3125. }
  3126. if( solid )
  3127. {
  3128. return ref;
  3129. }
  3130. }
  3131. uint8_t min = src[0];
  3132. uint8_t max = src[0];
  3133. for( int i=1; i<16; i++ )
  3134. {
  3135. if( min > src[i] ) min = src[i];
  3136. else if( max < src[i] ) max = src[i];
  3137. }
  3138. int srcRange = max - min;
  3139. int srcMid = min + srcRange / 2;
  3140. uint8_t buf[16][16];
  3141. int err = std::numeric_limits<int>::max();
  3142. int sel;
  3143. int selmul;
  3144. for( int r=0; r<16; r++ )
  3145. {
  3146. int mul = ( ( srcRange * g_alphaRange[r] ) >> 16 ) + 1;
  3147. int rangeErr = 0;
  3148. for( int i=0; i<16; i++ )
  3149. {
  3150. const auto srcVal = src[i];
  3151. int idx = 0;
  3152. const auto modVal = g_alpha[r][0] * mul;
  3153. const auto recVal = clampu8( srcMid + modVal );
  3154. int localErr = sq( srcVal - recVal );
  3155. if( localErr != 0 )
  3156. {
  3157. for( int j=1; j<8; j++ )
  3158. {
  3159. const auto modVal = g_alpha[r][j] * mul;
  3160. const auto recVal = clampu8( srcMid + modVal );
  3161. const auto errProbe = sq( srcVal - recVal );
  3162. if( errProbe < localErr )
  3163. {
  3164. localErr = errProbe;
  3165. idx = j;
  3166. }
  3167. }
  3168. }
  3169. buf[r][i] = idx;
  3170. rangeErr += localErr;
  3171. }
  3172. if( rangeErr < err )
  3173. {
  3174. err = rangeErr;
  3175. sel = r;
  3176. selmul = mul;
  3177. if( err == 0 ) break;
  3178. }
  3179. }
  3180. uint64_t d = ( uint64_t( srcMid ) << 56 ) |
  3181. ( uint64_t( selmul ) << 52 ) |
  3182. ( uint64_t( sel ) << 48 );
  3183. int offset = 45;
  3184. auto ptr = buf[sel];
  3185. for( int i=0; i<16; i++ )
  3186. {
  3187. d |= uint64_t( *ptr++ ) << offset;
  3188. offset -= 3;
  3189. }
  3190. return _bswap64( d );
  3191. #endif
  3192. }
  3193. void CompressEtc1Alpha( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
  3194. {
  3195. int w = 0;
  3196. uint32_t buf[4*4];
  3197. do
  3198. {
  3199. #ifdef __SSE4_1__
  3200. __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
  3201. __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
  3202. __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
  3203. __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
  3204. _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
  3205. __m128i c0 = _mm_castps_si128( px0 );
  3206. __m128i c1 = _mm_castps_si128( px1 );
  3207. __m128i c2 = _mm_castps_si128( px2 );
  3208. __m128i c3 = _mm_castps_si128( px3 );
  3209. __m128i mask = _mm_setr_epi32( 0x03030303, 0x07070707, 0x0b0b0b0b, 0x0f0f0f0f );
  3210. __m128i p0 = _mm_shuffle_epi8( c0, mask );
  3211. __m128i p1 = _mm_shuffle_epi8( c1, mask );
  3212. __m128i p2 = _mm_shuffle_epi8( c2, mask );
  3213. __m128i p3 = _mm_shuffle_epi8( c3, mask );
  3214. _mm_store_si128( (__m128i*)(buf + 0), p0 );
  3215. _mm_store_si128( (__m128i*)(buf + 4), p1 );
  3216. _mm_store_si128( (__m128i*)(buf + 8), p2 );
  3217. _mm_store_si128( (__m128i*)(buf + 12), p3 );
  3218. src += 4;
  3219. #else
  3220. auto ptr = buf;
  3221. for( int x=0; x<4; x++ )
  3222. {
  3223. unsigned int a = *src >> 24;
  3224. *ptr++ = a | ( a << 8 ) | ( a << 16 );
  3225. src += width;
  3226. a = *src >> 24;
  3227. *ptr++ = a | ( a << 8 ) | ( a << 16 );
  3228. src += width;
  3229. a = *src >> 24;
  3230. *ptr++ = a | ( a << 8 ) | ( a << 16 );
  3231. src += width;
  3232. a = *src >> 24;
  3233. *ptr++ = a | ( a << 8 ) | ( a << 16 );
  3234. src -= width * 3 - 1;
  3235. }
  3236. #endif
  3237. if( ++w == width/4 )
  3238. {
  3239. src += width * 3;
  3240. w = 0;
  3241. }
  3242. *dst++ = ProcessRGB( (uint8_t*)buf );
  3243. }
  3244. while( --blocks );
  3245. }
  3246. void CompressEtc2Alpha( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width, bool useHeuristics )
  3247. {
  3248. int w = 0;
  3249. uint32_t buf[4*4];
  3250. do
  3251. {
  3252. #ifdef __SSE4_1__
  3253. __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
  3254. __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
  3255. __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
  3256. __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
  3257. _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
  3258. __m128i c0 = _mm_castps_si128( px0 );
  3259. __m128i c1 = _mm_castps_si128( px1 );
  3260. __m128i c2 = _mm_castps_si128( px2 );
  3261. __m128i c3 = _mm_castps_si128( px3 );
  3262. __m128i mask = _mm_setr_epi32( 0x03030303, 0x07070707, 0x0b0b0b0b, 0x0f0f0f0f );
  3263. __m128i p0 = _mm_shuffle_epi8( c0, mask );
  3264. __m128i p1 = _mm_shuffle_epi8( c1, mask );
  3265. __m128i p2 = _mm_shuffle_epi8( c2, mask );
  3266. __m128i p3 = _mm_shuffle_epi8( c3, mask );
  3267. _mm_store_si128( (__m128i*)(buf + 0), p0 );
  3268. _mm_store_si128( (__m128i*)(buf + 4), p1 );
  3269. _mm_store_si128( (__m128i*)(buf + 8), p2 );
  3270. _mm_store_si128( (__m128i*)(buf + 12), p3 );
  3271. src += 4;
  3272. #else
  3273. auto ptr = buf;
  3274. for( int x=0; x<4; x++ )
  3275. {
  3276. unsigned int a = *src >> 24;
  3277. *ptr++ = a | ( a << 8 ) | ( a << 16 );
  3278. src += width;
  3279. a = *src >> 24;
  3280. *ptr++ = a | ( a << 8 ) | ( a << 16 );
  3281. src += width;
  3282. a = *src >> 24;
  3283. *ptr++ = a | ( a << 8 ) | ( a << 16 );
  3284. src += width;
  3285. a = *src >> 24;
  3286. *ptr++ = a | ( a << 8 ) | ( a << 16 );
  3287. src -= width * 3 - 1;
  3288. }
  3289. #endif
  3290. if( ++w == width/4 )
  3291. {
  3292. src += width * 3;
  3293. w = 0;
  3294. }
  3295. *dst++ = ProcessRGB_ETC2( (uint8_t*)buf, useHeuristics );
  3296. }
  3297. while( --blocks );
  3298. }
  3299. #include <chrono>
  3300. #include <thread>
  3301. void CompressEtc1Rgb( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
  3302. {
  3303. int w = 0;
  3304. uint32_t buf[4*4];
  3305. do
  3306. {
  3307. #ifdef __SSE4_1__
  3308. __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
  3309. __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
  3310. __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
  3311. __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
  3312. _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
  3313. _mm_store_si128( (__m128i*)(buf + 0), _mm_castps_si128( px0 ) );
  3314. _mm_store_si128( (__m128i*)(buf + 4), _mm_castps_si128( px1 ) );
  3315. _mm_store_si128( (__m128i*)(buf + 8), _mm_castps_si128( px2 ) );
  3316. _mm_store_si128( (__m128i*)(buf + 12), _mm_castps_si128( px3 ) );
  3317. src += 4;
  3318. #else
  3319. auto ptr = buf;
  3320. for( int x=0; x<4; x++ )
  3321. {
  3322. *ptr++ = *src;
  3323. src += width;
  3324. *ptr++ = *src;
  3325. src += width;
  3326. *ptr++ = *src;
  3327. src += width;
  3328. *ptr++ = *src;
  3329. src -= width * 3 - 1;
  3330. }
  3331. #endif
  3332. if( ++w == width/4 )
  3333. {
  3334. src += width * 3;
  3335. w = 0;
  3336. }
  3337. *dst++ = ProcessRGB( (uint8_t*)buf );
  3338. }
  3339. while( --blocks );
  3340. }
  3341. void CompressEtc1RgbDither( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
  3342. {
  3343. int w = 0;
  3344. uint32_t buf[4*4];
  3345. do
  3346. {
  3347. #ifdef __SSE4_1__
  3348. __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
  3349. __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
  3350. __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
  3351. __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
  3352. _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
  3353. # ifdef __AVX2__
  3354. DitherAvx2( (uint8_t*)buf, _mm_castps_si128( px0 ), _mm_castps_si128( px1 ), _mm_castps_si128( px2 ), _mm_castps_si128( px3 ) );
  3355. # else
  3356. _mm_store_si128( (__m128i*)(buf + 0), _mm_castps_si128( px0 ) );
  3357. _mm_store_si128( (__m128i*)(buf + 4), _mm_castps_si128( px1 ) );
  3358. _mm_store_si128( (__m128i*)(buf + 8), _mm_castps_si128( px2 ) );
  3359. _mm_store_si128( (__m128i*)(buf + 12), _mm_castps_si128( px3 ) );
  3360. Dither( (uint8_t*)buf );
  3361. # endif
  3362. src += 4;
  3363. #else
  3364. auto ptr = buf;
  3365. for( int x=0; x<4; x++ )
  3366. {
  3367. *ptr++ = *src;
  3368. src += width;
  3369. *ptr++ = *src;
  3370. src += width;
  3371. *ptr++ = *src;
  3372. src += width;
  3373. *ptr++ = *src;
  3374. src -= width * 3 - 1;
  3375. }
  3376. #endif
  3377. if( ++w == width/4 )
  3378. {
  3379. src += width * 3;
  3380. w = 0;
  3381. }
  3382. *dst++ = ProcessRGB( (uint8_t*)buf );
  3383. }
  3384. while( --blocks );
  3385. }
  3386. void CompressEtc2Rgb( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width, bool useHeuristics )
  3387. {
  3388. int w = 0;
  3389. uint32_t buf[4*4];
  3390. do
  3391. {
  3392. #ifdef __SSE4_1__
  3393. __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
  3394. __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
  3395. __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
  3396. __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
  3397. _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
  3398. _mm_store_si128( (__m128i*)(buf + 0), _mm_castps_si128( px0 ) );
  3399. _mm_store_si128( (__m128i*)(buf + 4), _mm_castps_si128( px1 ) );
  3400. _mm_store_si128( (__m128i*)(buf + 8), _mm_castps_si128( px2 ) );
  3401. _mm_store_si128( (__m128i*)(buf + 12), _mm_castps_si128( px3 ) );
  3402. src += 4;
  3403. #else
  3404. auto ptr = buf;
  3405. for( int x=0; x<4; x++ )
  3406. {
  3407. *ptr++ = *src;
  3408. src += width;
  3409. *ptr++ = *src;
  3410. src += width;
  3411. *ptr++ = *src;
  3412. src += width;
  3413. *ptr++ = *src;
  3414. src -= width * 3 - 1;
  3415. }
  3416. #endif
  3417. if( ++w == width/4 )
  3418. {
  3419. src += width * 3;
  3420. w = 0;
  3421. }
  3422. *dst++ = ProcessRGB_ETC2( (uint8_t*)buf, useHeuristics );
  3423. }
  3424. while( --blocks );
  3425. }
  3426. void CompressEtc2Rgba( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width, bool useHeuristics )
  3427. {
  3428. int w = 0;
  3429. uint32_t rgba[4*4];
  3430. uint8_t alpha[4*4];
  3431. do
  3432. {
  3433. #ifdef __SSE4_1__
  3434. __m128 px0 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 0 ) ) );
  3435. __m128 px1 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 1 ) ) );
  3436. __m128 px2 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 2 ) ) );
  3437. __m128 px3 = _mm_castsi128_ps( _mm_loadu_si128( (__m128i*)( src + width * 3 ) ) );
  3438. _MM_TRANSPOSE4_PS( px0, px1, px2, px3 );
  3439. __m128i c0 = _mm_castps_si128( px0 );
  3440. __m128i c1 = _mm_castps_si128( px1 );
  3441. __m128i c2 = _mm_castps_si128( px2 );
  3442. __m128i c3 = _mm_castps_si128( px3 );
  3443. _mm_store_si128( (__m128i*)(rgba + 0), c0 );
  3444. _mm_store_si128( (__m128i*)(rgba + 4), c1 );
  3445. _mm_store_si128( (__m128i*)(rgba + 8), c2 );
  3446. _mm_store_si128( (__m128i*)(rgba + 12), c3 );
  3447. __m128i mask = _mm_setr_epi32( 0x0f0b0703, -1, -1, -1 );
  3448. __m128i a0 = _mm_shuffle_epi8( c0, mask );
  3449. __m128i a1 = _mm_shuffle_epi8( c1, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 3, 0, 3 ) ) );
  3450. __m128i a2 = _mm_shuffle_epi8( c2, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 3, 0, 3, 3 ) ) );
  3451. __m128i a3 = _mm_shuffle_epi8( c3, _mm_shuffle_epi32( mask, _MM_SHUFFLE( 0, 3, 3, 3 ) ) );
  3452. __m128i s0 = _mm_or_si128( a0, a1 );
  3453. __m128i s1 = _mm_or_si128( a2, a3 );
  3454. __m128i s2 = _mm_or_si128( s0, s1 );
  3455. _mm_store_si128( (__m128i*)alpha, s2 );
  3456. src += 4;
  3457. #else
  3458. auto ptr = rgba;
  3459. auto ptr8 = alpha;
  3460. for( int x=0; x<4; x++ )
  3461. {
  3462. auto v = *src;
  3463. *ptr++ = v;
  3464. *ptr8++ = v >> 24;
  3465. src += width;
  3466. v = *src;
  3467. *ptr++ = v;
  3468. *ptr8++ = v >> 24;
  3469. src += width;
  3470. v = *src;
  3471. *ptr++ = v;
  3472. *ptr8++ = v >> 24;
  3473. src += width;
  3474. v = *src;
  3475. *ptr++ = v;
  3476. *ptr8++ = v >> 24;
  3477. src -= width * 3 - 1;
  3478. }
  3479. #endif
  3480. if( ++w == width/4 )
  3481. {
  3482. src += width * 3;
  3483. w = 0;
  3484. }
  3485. *dst++ = ProcessAlpha_ETC2( alpha );
  3486. *dst++ = ProcessRGB_ETC2( (uint8_t*)rgba, useHeuristics );
  3487. }
  3488. while( --blocks );
  3489. }