squish.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. /* -----------------------------------------------------------------------------
  2. Copyright (c) 2006 Simon Brown si@sjbrown.co.uk
  3. Permission is hereby granted, free of charge, to any person obtaining
  4. a copy of this software and associated documentation files (the
  5. "Software"), to deal in the Software without restriction, including
  6. without limitation the rights to use, copy, modify, merge, publish,
  7. distribute, sublicense, and/or sell copies of the Software, and to
  8. permit persons to whom the Software is furnished to do so, subject to
  9. the following conditions:
  10. The above copyright notice and this permission notice shall be included
  11. in all copies or substantial portions of the Software.
  12. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  13. OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  14. MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  15. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
  16. CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  17. TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  18. SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  19. -------------------------------------------------------------------------- */
  20. #include <string.h>
  21. #include "squish.h"
  22. #include "colourset.h"
  23. #include "maths.h"
  24. #include "rangefit.h"
  25. #include "clusterfit.h"
  26. #include "colourblock.h"
  27. #include "alpha.h"
  28. #include "singlecolourfit.h"
  29. namespace squish {
  30. static int FixFlags( int flags )
  31. {
  32. // grab the flag bits
  33. int method = flags & ( kDxt1 | kDxt3 | kDxt5 | kBc4 | kBc5 );
  34. int fit = flags & ( kColourIterativeClusterFit | kColourClusterFit | kColourRangeFit );
  35. int extra = flags & (kWeightColourByAlpha | kToLinear);
  36. // set defaults
  37. if ( method != kDxt3
  38. && method != kDxt5
  39. && method != kBc4
  40. && method != kBc5 )
  41. {
  42. method = kDxt1;
  43. }
  44. if( fit != kColourRangeFit && fit != kColourIterativeClusterFit )
  45. fit = kColourClusterFit;
  46. // done
  47. return method | fit | extra;
  48. }
  49. void CompressMasked( u8 const* rgba, int mask, void* block, int flags, float* metric )
  50. {
  51. // fix any bad flags
  52. flags = FixFlags( flags );
  53. if ( ( flags & ( kBc4 | kBc5 ) ) != 0 )
  54. {
  55. u8 alpha[16*4];
  56. for( int i = 0; i < 16; ++i )
  57. {
  58. alpha[i*4 + 3] = rgba[i*4 + 0]; // copy R to A
  59. }
  60. u8* rBlock = reinterpret_cast< u8* >( block );
  61. CompressAlphaDxt5( alpha, mask, rBlock );
  62. if ( ( flags & ( kBc5 ) ) != 0 )
  63. {
  64. for( int i = 0; i < 16; ++i )
  65. {
  66. alpha[i*4 + 3] = rgba[i*4 + 1]; // copy G to A
  67. }
  68. u8* gBlock = reinterpret_cast< u8* >( block ) + 8;
  69. CompressAlphaDxt5( alpha, mask, gBlock );
  70. }
  71. return;
  72. }
  73. // get the block locations
  74. void* colourBlock = block;
  75. void* alphaBlock = block;
  76. if( ( flags & ( kDxt3 | kDxt5 ) ) != 0 )
  77. colourBlock = reinterpret_cast< u8* >( block ) + 8;
  78. // create the minimal point set
  79. ColourSet colours( rgba, mask, flags );
  80. // check the compression type and compress colour
  81. if( colours.GetCount() == 1 )
  82. {
  83. // always do a single colour fit
  84. SingleColourFit fit( &colours, flags );
  85. fit.Compress( colourBlock );
  86. }
  87. else if( ( flags & kColourRangeFit ) != 0 || colours.GetCount() == 0 )
  88. {
  89. // do a range fit
  90. RangeFit fit( &colours, flags, metric );
  91. fit.Compress( colourBlock );
  92. }
  93. else
  94. {
  95. // default to a cluster fit (could be iterative or not)
  96. ClusterFit fit( &colours, flags, metric );
  97. fit.Compress( colourBlock );
  98. }
  99. // compress alpha separately if necessary
  100. if( ( flags & kDxt3 ) != 0 )
  101. CompressAlphaDxt3( rgba, mask, alphaBlock );
  102. else if( ( flags & kDxt5 ) != 0 )
  103. CompressAlphaDxt5( rgba, mask, alphaBlock );
  104. }
  105. void Decompress( u8* rgba, void const* block, int flags )
  106. {
  107. // fix any bad flags
  108. flags = FixFlags( flags );
  109. // get the block locations
  110. void const* colourBlock = block;
  111. void const* alphaBlock = block;
  112. if( ( flags & ( kDxt3 | kDxt5 ) ) != 0 )
  113. colourBlock = reinterpret_cast< u8 const* >( block ) + 8;
  114. // decompress colour
  115. DecompressColour( rgba, colourBlock, ( flags & kDxt1 ) != 0 );
  116. // decompress alpha separately if necessary
  117. if( ( flags & kDxt3 ) != 0 )
  118. DecompressAlphaDxt3( rgba, alphaBlock );
  119. else if( ( flags & kDxt5 ) != 0 )
  120. DecompressAlphaDxt5( rgba, alphaBlock );
  121. }
  122. int GetStorageRequirements( int width, int height, int flags )
  123. {
  124. // fix any bad flags
  125. flags = FixFlags( flags );
  126. // compute the storage requirements
  127. int blockcount = ( ( width + 3 )/4 ) * ( ( height + 3 )/4 );
  128. int blocksize = ( ( flags & ( kDxt1 | kBc4 ) ) != 0 ) ? 8 : 16;
  129. return blockcount*blocksize;
  130. }
  131. void CopyRGBA( u8 const* source, u8* dest, int flags )
  132. {
  133. if (flags & kSourceBGRA)
  134. {
  135. // convert from bgra to rgba
  136. dest[0] = source[2];
  137. dest[1] = source[1];
  138. dest[2] = source[0];
  139. dest[3] = source[3];
  140. }
  141. else
  142. {
  143. for( int i = 0; i < 4; ++i )
  144. *dest++ = *source++;
  145. }
  146. }
  147. void CompressImage( u8 const* rgba, int width, int height, int pitch, void* blocks, int flags, float* metric )
  148. {
  149. // fix any bad flags
  150. flags = FixFlags( flags );
  151. // loop over blocks
  152. #ifdef SQUISH_USE_OPENMP
  153. # pragma omp parallel for
  154. #endif
  155. for( int y = 0; y < height; y += 4 )
  156. {
  157. // initialise the block output
  158. u8* targetBlock = reinterpret_cast< u8* >( blocks );
  159. int bytesPerBlock = ( ( flags & ( kDxt1 | kBc4 ) ) != 0 ) ? 8 : 16;
  160. targetBlock += ( (y / 4) * ( (width + 3) / 4) ) * bytesPerBlock;
  161. for( int x = 0; x < width; x += 4 )
  162. {
  163. // build the 4x4 block of pixels
  164. u8 sourceRgba[16*4];
  165. u8* targetPixel = sourceRgba;
  166. int mask = 0;
  167. for( int py = 0; py < 4; ++py )
  168. {
  169. for( int px = 0; px < 4; ++px )
  170. {
  171. // get the source pixel in the image
  172. int sx = x + px;
  173. int sy = y + py;
  174. // enable if we're in the image
  175. if( sx < width && sy < height )
  176. {
  177. // copy the rgba value
  178. u8 const* sourcePixel = rgba + pitch*sy + 4*sx;
  179. CopyRGBA(sourcePixel, targetPixel, flags);
  180. // enable this pixel
  181. mask |= ( 1 << ( 4*py + px ) );
  182. }
  183. // advance to the next pixel
  184. targetPixel += 4;
  185. }
  186. }
  187. // compress it into the output
  188. CompressMasked( sourceRgba, mask, targetBlock, flags, metric );
  189. // advance
  190. targetBlock += bytesPerBlock;
  191. }
  192. }
  193. }
  194. void CompressImage( u8 const* rgba, int width, int height, void* blocks, int flags, float* metric )
  195. {
  196. CompressImage(rgba, width, height, width*4, blocks, flags, metric);
  197. }
  198. void DecompressImage( u8* rgba, int width, int height, int pitch, void const* blocks, int flags )
  199. {
  200. // fix any bad flags
  201. flags = FixFlags( flags );
  202. // loop over blocks
  203. #ifdef SQUISH_USE_OPENMP
  204. # pragma omp parallel for
  205. #endif
  206. for( int y = 0; y < height; y += 4 )
  207. {
  208. // initialise the block input
  209. u8 const* sourceBlock = reinterpret_cast< u8 const* >( blocks );
  210. int bytesPerBlock = ( ( flags & ( kDxt1 | kBc4 ) ) != 0 ) ? 8 : 16;
  211. sourceBlock += ( (y / 4) * ( (width + 3) / 4) ) * bytesPerBlock;
  212. for( int x = 0; x < width; x += 4 )
  213. {
  214. // decompress the block
  215. u8 targetRgba[4*16];
  216. Decompress( targetRgba, sourceBlock, flags );
  217. // write the decompressed pixels to the correct image locations
  218. u8 const* sourcePixel = targetRgba;
  219. for( int py = 0; py < 4; ++py )
  220. {
  221. for( int px = 0; px < 4; ++px )
  222. {
  223. // get the target location
  224. int sx = x + px;
  225. int sy = y + py;
  226. // write if we're in the image
  227. if( sx < width && sy < height )
  228. {
  229. // copy the rgba value
  230. u8* targetPixel = rgba + pitch*sy + 4*sx;
  231. CopyRGBA(sourcePixel, targetPixel, flags);
  232. }
  233. // advance to the next pixel
  234. sourcePixel += 4;
  235. }
  236. }
  237. // advance
  238. sourceBlock += bytesPerBlock;
  239. }
  240. }
  241. }
  242. void DecompressImage( u8* rgba, int width, int height, void const* blocks, int flags )
  243. {
  244. DecompressImage( rgba, width, height, width*4, blocks, flags );
  245. }
  246. static double ErrorSq(double x, double y)
  247. {
  248. return (x - y) * (x - y);
  249. }
  250. static void ComputeBlockWMSE(u8 const *original, u8 const *compressed, unsigned int w, unsigned int h, double &cmse, double &amse)
  251. {
  252. // Computes the MSE for the block and weights it by the variance of the original block.
  253. // If the variance of the original block is less than 4 (i.e. a standard deviation of 1 per channel)
  254. // then the block is close to being a single colour. Quantisation errors in single colour blocks
  255. // are easier to see than similar errors in blocks that contain more colours, particularly when there
  256. // are many such blocks in a large area (eg a blue sky background) as they cause banding. Given that
  257. // banding is easier to see than small errors in "complex" blocks, we weight the errors by a factor
  258. // of 5. This implies that images with large, single colour areas will have a higher potential WMSE
  259. // than images with lots of detail.
  260. cmse = amse = 0;
  261. unsigned int sum_p[4]; // per channel sum of pixels
  262. unsigned int sum_p2[4]; // per channel sum of pixels squared
  263. memset(sum_p, 0, sizeof(sum_p));
  264. memset(sum_p2, 0, sizeof(sum_p2));
  265. for( unsigned int py = 0; py < 4; ++py )
  266. {
  267. for( unsigned int px = 0; px < 4; ++px )
  268. {
  269. if( px < w && py < h )
  270. {
  271. double pixelCMSE = 0;
  272. for( int i = 0; i < 3; ++i )
  273. {
  274. pixelCMSE += ErrorSq(original[i], compressed[i]);
  275. sum_p[i] += original[i];
  276. sum_p2[i] += (unsigned int)original[i]*original[i];
  277. }
  278. if( original[3] == 0 && compressed[3] == 0 )
  279. pixelCMSE = 0; // transparent in both, so colour is inconsequential
  280. amse += ErrorSq(original[3], compressed[3]);
  281. cmse += pixelCMSE;
  282. sum_p[3] += original[3];
  283. sum_p2[3] += (unsigned int)original[3]*original[3];
  284. }
  285. original += 4;
  286. compressed += 4;
  287. }
  288. }
  289. unsigned int variance = 0;
  290. for( int i = 0; i < 4; ++i )
  291. variance += w*h*sum_p2[i] - sum_p[i]*sum_p[i];
  292. if( variance < 4 * w * w * h * h )
  293. {
  294. amse *= 5;
  295. cmse *= 5;
  296. }
  297. }
  298. void ComputeMSE( u8 const *rgba, int width, int height, int pitch, u8 const *dxt, int flags, double &colourMSE, double &alphaMSE )
  299. {
  300. // fix any bad flags
  301. flags = FixFlags( flags );
  302. colourMSE = alphaMSE = 0;
  303. // initialise the block input
  304. squish::u8 const* sourceBlock = dxt;
  305. int bytesPerBlock = ( ( flags & squish::kDxt1 ) != 0 ) ? 8 : 16;
  306. // loop over blocks
  307. for( int y = 0; y < height; y += 4 )
  308. {
  309. for( int x = 0; x < width; x += 4 )
  310. {
  311. // decompress the block
  312. u8 targetRgba[4*16];
  313. Decompress( targetRgba, sourceBlock, flags );
  314. u8 const* sourcePixel = targetRgba;
  315. // copy across to a similar pixel block
  316. u8 originalRgba[4*16];
  317. u8* originalPixel = originalRgba;
  318. for( int py = 0; py < 4; ++py )
  319. {
  320. for( int px = 0; px < 4; ++px )
  321. {
  322. int sx = x + px;
  323. int sy = y + py;
  324. if( sx < width && sy < height )
  325. {
  326. u8 const* targetPixel = rgba + pitch*sy + 4*sx;
  327. CopyRGBA(targetPixel, originalPixel, flags);
  328. }
  329. sourcePixel += 4;
  330. originalPixel += 4;
  331. }
  332. }
  333. // compute the weighted MSE of the block
  334. double blockCMSE, blockAMSE;
  335. ComputeBlockWMSE(originalRgba, targetRgba, std::min(4, width - x), std::min(4, height - y), blockCMSE, blockAMSE);
  336. colourMSE += blockCMSE;
  337. alphaMSE += blockAMSE;
  338. // advance
  339. sourceBlock += bytesPerBlock;
  340. }
  341. }
  342. colourMSE /= (width * height * 3);
  343. alphaMSE /= (width * height);
  344. }
  345. void ComputeMSE( u8 const *rgba, int width, int height, u8 const *dxt, int flags, double &colourMSE, double &alphaMSE )
  346. {
  347. ComputeMSE(rgba, width, height, width*4, dxt, flags, colourMSE, alphaMSE);
  348. }
  349. } // namespace squish