lz4defs.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. #ifndef __LZ4DEFS_H__
  2. #define __LZ4DEFS_H__
  3. /*
  4. * lz4defs.h -- common and architecture specific defines for the kernel usage
  5. * LZ4 - Fast LZ compression algorithm
  6. * Copyright (C) 2011-2016, Yann Collet.
  7. * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are
  10. * met:
  11. * * Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * * Redistributions in binary form must reproduce the above
  14. * copyright notice, this list of conditions and the following disclaimer
  15. * in the documentation and/or other materials provided with the
  16. * distribution.
  17. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. * You can contact the author at :
  29. * - LZ4 homepage : http://www.lz4.org
  30. * - LZ4 source repository : https://github.com/lz4/lz4
  31. *
  32. * Changed for kernel usage by:
  33. * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
  34. */
  35. #include <asm/unaligned.h>
  36. #include <linux/string.h> /* memset, memcpy */
  37. #define FORCE_INLINE __always_inline
  38. /*-************************************
  39. * Basic Types
  40. **************************************/
  41. #include <linux/types.h>
  42. typedef uint8_t BYTE;
  43. typedef uint16_t U16;
  44. typedef uint32_t U32;
  45. typedef int32_t S32;
  46. typedef uint64_t U64;
  47. typedef uintptr_t uptrval;
  48. /*-************************************
  49. * Architecture specifics
  50. **************************************/
  51. #if defined(CONFIG_64BIT)
  52. #define LZ4_ARCH64 1
  53. #else
  54. #define LZ4_ARCH64 0
  55. #endif
  56. #if defined(__LITTLE_ENDIAN)
  57. #define LZ4_LITTLE_ENDIAN 1
  58. #else
  59. #define LZ4_LITTLE_ENDIAN 0
  60. #endif
  61. /*-************************************
  62. * Constants
  63. **************************************/
  64. #define MINMATCH 4
  65. #define WILDCOPYLENGTH 8
  66. #define LASTLITERALS 5
  67. #define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
  68. /*
  69. * ensure it's possible to write 2 x wildcopyLength
  70. * without overflowing output buffer
  71. */
  72. #define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH)
  73. /* Increase this value ==> compression run slower on incompressible data */
  74. #define LZ4_SKIPTRIGGER 6
  75. #define HASH_UNIT sizeof(size_t)
  76. #define KB (1 << 10)
  77. #define MB (1 << 20)
  78. #define GB (1U << 30)
  79. #define MAXD_LOG 16
  80. #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
  81. #define STEPSIZE sizeof(size_t)
  82. #define ML_BITS 4
  83. #define ML_MASK ((1U << ML_BITS) - 1)
  84. #define RUN_BITS (8 - ML_BITS)
  85. #define RUN_MASK ((1U << RUN_BITS) - 1)
  86. /*-************************************
  87. * Reading and writing into memory
  88. **************************************/
  89. static FORCE_INLINE U16 LZ4_read16(const void *ptr)
  90. {
  91. return get_unaligned((const U16 *)ptr);
  92. }
  93. static FORCE_INLINE U32 LZ4_read32(const void *ptr)
  94. {
  95. return get_unaligned((const U32 *)ptr);
  96. }
  97. static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr)
  98. {
  99. return get_unaligned((const size_t *)ptr);
  100. }
  101. static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value)
  102. {
  103. put_unaligned(value, (U16 *)memPtr);
  104. }
  105. static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value)
  106. {
  107. put_unaligned(value, (U32 *)memPtr);
  108. }
  109. static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr)
  110. {
  111. return get_unaligned_le16(memPtr);
  112. }
  113. static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
  114. {
  115. return put_unaligned_le16(value, memPtr);
  116. }
  117. static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
  118. {
  119. #if LZ4_ARCH64
  120. U64 a = get_unaligned((const U64 *)src);
  121. put_unaligned(a, (U64 *)dst);
  122. #else
  123. U32 a = get_unaligned((const U32 *)src);
  124. U32 b = get_unaligned((const U32 *)src + 1);
  125. put_unaligned(a, (U32 *)dst);
  126. put_unaligned(b, (U32 *)dst + 1);
  127. #endif
  128. }
  129. /*
  130. * customized variant of memcpy,
  131. * which can overwrite up to 7 bytes beyond dstEnd
  132. */
  133. static FORCE_INLINE void LZ4_wildCopy(void *dstPtr,
  134. const void *srcPtr, void *dstEnd)
  135. {
  136. BYTE *d = (BYTE *)dstPtr;
  137. const BYTE *s = (const BYTE *)srcPtr;
  138. BYTE *const e = (BYTE *)dstEnd;
  139. do {
  140. LZ4_copy8(d, s);
  141. d += 8;
  142. s += 8;
  143. } while (d < e);
  144. }
  145. static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val)
  146. {
  147. #if LZ4_LITTLE_ENDIAN
  148. return __ffs(val) >> 3;
  149. #else
  150. return (BITS_PER_LONG - 1 - __fls(val)) >> 3;
  151. #endif
  152. }
  153. static FORCE_INLINE unsigned int LZ4_count(
  154. const BYTE *pIn,
  155. const BYTE *pMatch,
  156. const BYTE *pInLimit)
  157. {
  158. const BYTE *const pStart = pIn;
  159. while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
  160. size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
  161. if (!diff) {
  162. pIn += STEPSIZE;
  163. pMatch += STEPSIZE;
  164. continue;
  165. }
  166. pIn += LZ4_NbCommonBytes(diff);
  167. return (unsigned int)(pIn - pStart);
  168. }
  169. #if LZ4_ARCH64
  170. if ((pIn < (pInLimit - 3))
  171. && (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
  172. pIn += 4;
  173. pMatch += 4;
  174. }
  175. #endif
  176. if ((pIn < (pInLimit - 1))
  177. && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
  178. pIn += 2;
  179. pMatch += 2;
  180. }
  181. if ((pIn < pInLimit) && (*pMatch == *pIn))
  182. pIn++;
  183. return (unsigned int)(pIn - pStart);
  184. }
  185. typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
  186. typedef enum { byPtr, byU32, byU16 } tableType_t;
  187. typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
  188. typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
  189. typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
  190. typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
  191. #define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c))
  192. #endif