entropy_common.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /* ******************************************************************
  2. * Common functions of New Generation Entropy library
  3. * Copyright (c) Meta Platforms, Inc. and affiliates.
  4. *
  5. * You can contact the author at :
  6. * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
  7. * - Public forum : https://groups.google.com/forum/#!forum/lz4c
  8. *
  9. * This source code is licensed under both the BSD-style license (found in the
  10. * LICENSE file in the root directory of this source tree) and the GPLv2 (found
  11. * in the COPYING file in the root directory of this source tree).
  12. * You may select, at your option, one of the above-listed licenses.
  13. ****************************************************************** */
  14. /* *************************************
  15. * Dependencies
  16. ***************************************/
  17. #include "mem.h"
  18. #include "error_private.h" /* ERR_*, ERROR */
  19. #define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */
  20. #include "fse.h"
  21. #include "huf.h"
  22. #include "bits.h" /* ZSDT_highbit32, ZSTD_countTrailingZeros32 */
  23. /*=== Version ===*/
  24. unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
  25. /*=== Error Management ===*/
  26. unsigned FSE_isError(size_t code) { return ERR_isError(code); }
  27. const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); }
  28. unsigned HUF_isError(size_t code) { return ERR_isError(code); }
  29. const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); }
  30. /*-**************************************************************
  31. * FSE NCount encoding-decoding
  32. ****************************************************************/
  33. FORCE_INLINE_TEMPLATE
  34. size_t FSE_readNCount_body(short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
  35. const void* headerBuffer, size_t hbSize)
  36. {
  37. const BYTE* const istart = (const BYTE*) headerBuffer;
  38. const BYTE* const iend = istart + hbSize;
  39. const BYTE* ip = istart;
  40. int nbBits;
  41. int remaining;
  42. int threshold;
  43. U32 bitStream;
  44. int bitCount;
  45. unsigned charnum = 0;
  46. unsigned const maxSV1 = *maxSVPtr + 1;
  47. int previous0 = 0;
  48. if (hbSize < 8) {
  49. /* This function only works when hbSize >= 8 */
  50. char buffer[8] = {0};
  51. ZSTD_memcpy(buffer, headerBuffer, hbSize);
  52. { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr,
  53. buffer, sizeof(buffer));
  54. if (FSE_isError(countSize)) return countSize;
  55. if (countSize > hbSize) return ERROR(corruption_detected);
  56. return countSize;
  57. } }
  58. assert(hbSize >= 8);
  59. /* init */
  60. ZSTD_memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */
  61. bitStream = MEM_readLE32(ip);
  62. nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
  63. if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
  64. bitStream >>= 4;
  65. bitCount = 4;
  66. *tableLogPtr = nbBits;
  67. remaining = (1<<nbBits)+1;
  68. threshold = 1<<nbBits;
  69. nbBits++;
  70. for (;;) {
  71. if (previous0) {
  72. /* Count the number of repeats. Each time the
  73. * 2-bit repeat code is 0b11 there is another
  74. * repeat.
  75. * Avoid UB by setting the high bit to 1.
  76. */
  77. int repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1;
  78. while (repeats >= 12) {
  79. charnum += 3 * 12;
  80. if (LIKELY(ip <= iend-7)) {
  81. ip += 3;
  82. } else {
  83. bitCount -= (int)(8 * (iend - 7 - ip));
  84. bitCount &= 31;
  85. ip = iend - 4;
  86. }
  87. bitStream = MEM_readLE32(ip) >> bitCount;
  88. repeats = ZSTD_countTrailingZeros32(~bitStream | 0x80000000) >> 1;
  89. }
  90. charnum += 3 * repeats;
  91. bitStream >>= 2 * repeats;
  92. bitCount += 2 * repeats;
  93. /* Add the final repeat which isn't 0b11. */
  94. assert((bitStream & 3) < 3);
  95. charnum += bitStream & 3;
  96. bitCount += 2;
  97. /* This is an error, but break and return an error
  98. * at the end, because returning out of a loop makes
  99. * it harder for the compiler to optimize.
  100. */
  101. if (charnum >= maxSV1) break;
  102. /* We don't need to set the normalized count to 0
  103. * because we already memset the whole buffer to 0.
  104. */
  105. if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
  106. assert((bitCount >> 3) <= 3); /* For first condition to work */
  107. ip += bitCount>>3;
  108. bitCount &= 7;
  109. } else {
  110. bitCount -= (int)(8 * (iend - 4 - ip));
  111. bitCount &= 31;
  112. ip = iend - 4;
  113. }
  114. bitStream = MEM_readLE32(ip) >> bitCount;
  115. }
  116. {
  117. int const max = (2*threshold-1) - remaining;
  118. int count;
  119. if ((bitStream & (threshold-1)) < (U32)max) {
  120. count = bitStream & (threshold-1);
  121. bitCount += nbBits-1;
  122. } else {
  123. count = bitStream & (2*threshold-1);
  124. if (count >= threshold) count -= max;
  125. bitCount += nbBits;
  126. }
  127. count--; /* extra accuracy */
  128. /* When it matters (small blocks), this is a
  129. * predictable branch, because we don't use -1.
  130. */
  131. if (count >= 0) {
  132. remaining -= count;
  133. } else {
  134. assert(count == -1);
  135. remaining += count;
  136. }
  137. normalizedCounter[charnum++] = (short)count;
  138. previous0 = !count;
  139. assert(threshold > 1);
  140. if (remaining < threshold) {
  141. /* This branch can be folded into the
  142. * threshold update condition because we
  143. * know that threshold > 1.
  144. */
  145. if (remaining <= 1) break;
  146. nbBits = ZSTD_highbit32(remaining) + 1;
  147. threshold = 1 << (nbBits - 1);
  148. }
  149. if (charnum >= maxSV1) break;
  150. if (LIKELY(ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
  151. ip += bitCount>>3;
  152. bitCount &= 7;
  153. } else {
  154. bitCount -= (int)(8 * (iend - 4 - ip));
  155. bitCount &= 31;
  156. ip = iend - 4;
  157. }
  158. bitStream = MEM_readLE32(ip) >> bitCount;
  159. } }
  160. if (remaining != 1) return ERROR(corruption_detected);
  161. /* Only possible when there are too many zeros. */
  162. if (charnum > maxSV1) return ERROR(maxSymbolValue_tooSmall);
  163. if (bitCount > 32) return ERROR(corruption_detected);
  164. *maxSVPtr = charnum-1;
  165. ip += (bitCount+7)>>3;
  166. return ip-istart;
  167. }
  168. /* Avoids the FORCE_INLINE of the _body() function. */
  169. static size_t FSE_readNCount_body_default(
  170. short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
  171. const void* headerBuffer, size_t hbSize)
  172. {
  173. return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
  174. }
  175. #if DYNAMIC_BMI2
  176. BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2(
  177. short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
  178. const void* headerBuffer, size_t hbSize)
  179. {
  180. return FSE_readNCount_body(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
  181. }
  182. #endif
  183. size_t FSE_readNCount_bmi2(
  184. short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
  185. const void* headerBuffer, size_t hbSize, int bmi2)
  186. {
  187. #if DYNAMIC_BMI2
  188. if (bmi2) {
  189. return FSE_readNCount_body_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
  190. }
  191. #endif
  192. (void)bmi2;
  193. return FSE_readNCount_body_default(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize);
  194. }
  195. size_t FSE_readNCount(
  196. short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
  197. const void* headerBuffer, size_t hbSize)
  198. {
  199. return FSE_readNCount_bmi2(normalizedCounter, maxSVPtr, tableLogPtr, headerBuffer, hbSize, /* bmi2 */ 0);
  200. }
  201. /*! HUF_readStats() :
  202. Read compact Huffman tree, saved by HUF_writeCTable().
  203. `huffWeight` is destination buffer.
  204. `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
  205. @return : size read from `src` , or an error Code .
  206. Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
  207. */
  208. size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
  209. U32* nbSymbolsPtr, U32* tableLogPtr,
  210. const void* src, size_t srcSize)
  211. {
  212. U32 wksp[HUF_READ_STATS_WORKSPACE_SIZE_U32];
  213. return HUF_readStats_wksp(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, wksp, sizeof(wksp), /* flags */ 0);
  214. }
  215. FORCE_INLINE_TEMPLATE size_t
  216. HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats,
  217. U32* nbSymbolsPtr, U32* tableLogPtr,
  218. const void* src, size_t srcSize,
  219. void* workSpace, size_t wkspSize,
  220. int bmi2)
  221. {
  222. U32 weightTotal;
  223. const BYTE* ip = (const BYTE*) src;
  224. size_t iSize;
  225. size_t oSize;
  226. if (!srcSize) return ERROR(srcSize_wrong);
  227. iSize = ip[0];
  228. /* ZSTD_memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
  229. if (iSize >= 128) { /* special header */
  230. oSize = iSize - 127;
  231. iSize = ((oSize+1)/2);
  232. if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
  233. if (oSize >= hwSize) return ERROR(corruption_detected);
  234. ip += 1;
  235. { U32 n;
  236. for (n=0; n<oSize; n+=2) {
  237. huffWeight[n] = ip[n/2] >> 4;
  238. huffWeight[n+1] = ip[n/2] & 15;
  239. } } }
  240. else { /* header compressed with FSE (normal case) */
  241. if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
  242. /* max (hwSize-1) values decoded, as last one is implied */
  243. oSize = FSE_decompress_wksp_bmi2(huffWeight, hwSize-1, ip+1, iSize, 6, workSpace, wkspSize, bmi2);
  244. if (FSE_isError(oSize)) return oSize;
  245. }
  246. /* collect weight stats */
  247. ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
  248. weightTotal = 0;
  249. { U32 n; for (n=0; n<oSize; n++) {
  250. if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
  251. rankStats[huffWeight[n]]++;
  252. weightTotal += (1 << huffWeight[n]) >> 1;
  253. } }
  254. if (weightTotal == 0) return ERROR(corruption_detected);
  255. /* get last non-null symbol weight (implied, total must be 2^n) */
  256. { U32 const tableLog = ZSTD_highbit32(weightTotal) + 1;
  257. if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
  258. *tableLogPtr = tableLog;
  259. /* determine last weight */
  260. { U32 const total = 1 << tableLog;
  261. U32 const rest = total - weightTotal;
  262. U32 const verif = 1 << ZSTD_highbit32(rest);
  263. U32 const lastWeight = ZSTD_highbit32(rest) + 1;
  264. if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
  265. huffWeight[oSize] = (BYTE)lastWeight;
  266. rankStats[lastWeight]++;
  267. } }
  268. /* check tree construction validity */
  269. if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
  270. /* results */
  271. *nbSymbolsPtr = (U32)(oSize+1);
  272. return iSize+1;
  273. }
  274. /* Avoids the FORCE_INLINE of the _body() function. */
  275. static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* rankStats,
  276. U32* nbSymbolsPtr, U32* tableLogPtr,
  277. const void* src, size_t srcSize,
  278. void* workSpace, size_t wkspSize)
  279. {
  280. return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 0);
  281. }
  282. #if DYNAMIC_BMI2
  283. static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats,
  284. U32* nbSymbolsPtr, U32* tableLogPtr,
  285. const void* src, size_t srcSize,
  286. void* workSpace, size_t wkspSize)
  287. {
  288. return HUF_readStats_body(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize, 1);
  289. }
  290. #endif
  291. size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, U32* rankStats,
  292. U32* nbSymbolsPtr, U32* tableLogPtr,
  293. const void* src, size_t srcSize,
  294. void* workSpace, size_t wkspSize,
  295. int flags)
  296. {
  297. #if DYNAMIC_BMI2
  298. if (flags & HUF_flags_bmi2) {
  299. return HUF_readStats_body_bmi2(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
  300. }
  301. #endif
  302. (void)flags;
  303. return HUF_readStats_body_default(huffWeight, hwSize, rankStats, nbSymbolsPtr, tableLogPtr, src, srcSize, workSpace, wkspSize);
  304. }