enc_loop_asm.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. // Apologies in advance for combining the preprocessor with inline assembly,
  2. // two notoriously gnarly parts of C, but it was necessary to avoid a lot of
  3. // code repetition. The preprocessor is used to template large sections of
  4. // inline assembly that differ only in the registers used. If the code was
  5. // written out by hand, it would become very large and hard to audit.
  6. // Generate a block of inline assembly that loads register R0 from memory. The
  7. // offset at which the register is loaded is set by the given round and a
  8. // constant offset.
  9. #define LOAD(R0, ROUND, OFFSET) \
  10. "vlddqu ("#ROUND" * 24 + "#OFFSET")(%[src]), %["R0"] \n\t"
  11. // Generate a block of inline assembly that deinterleaves and shuffles register
  12. // R0 using preloaded constants. Outputs in R0 and R1.
  13. #define SHUF(R0, R1, R2) \
  14. "vpshufb %[lut0], %["R0"], %["R1"] \n\t" \
  15. "vpand %["R1"], %[msk0], %["R2"] \n\t" \
  16. "vpand %["R1"], %[msk2], %["R1"] \n\t" \
  17. "vpmulhuw %["R2"], %[msk1], %["R2"] \n\t" \
  18. "vpmullw %["R1"], %[msk3], %["R1"] \n\t" \
  19. "vpor %["R1"], %["R2"], %["R1"] \n\t"
  20. // Generate a block of inline assembly that takes R0 and R1 and translates
  21. // their contents to the base64 alphabet, using preloaded constants.
  22. #define TRAN(R0, R1, R2) \
  23. "vpsubusb %[n51], %["R1"], %["R0"] \n\t" \
  24. "vpcmpgtb %[n25], %["R1"], %["R2"] \n\t" \
  25. "vpsubb %["R2"], %["R0"], %["R0"] \n\t" \
  26. "vpshufb %["R0"], %[lut1], %["R2"] \n\t" \
  27. "vpaddb %["R1"], %["R2"], %["R0"] \n\t"
  28. // Generate a block of inline assembly that stores the given register R0 at an
  29. // offset set by the given round.
  30. #define STOR(R0, ROUND) \
  31. "vmovdqu %["R0"], ("#ROUND" * 32)(%[dst]) \n\t"
  32. // Generate a block of inline assembly that generates a single self-contained
  33. // encoder round: fetch the data, process it, and store the result. Then update
  34. // the source and destination pointers.
  35. #define ROUND() \
  36. LOAD("a", 0, -4) \
  37. SHUF("a", "b", "c") \
  38. TRAN("a", "b", "c") \
  39. STOR("a", 0) \
  40. "add $24, %[src] \n\t" \
  41. "add $32, %[dst] \n\t"
  42. // Define a macro that initiates a three-way interleaved encoding round by
  43. // preloading registers a, b and c from memory.
  44. // The register graph shows which registers are in use during each step, and
  45. // is a visual aid for choosing registers for that step. Symbol index:
  46. //
  47. // + indicates that a register is loaded by that step.
  48. // | indicates that a register is in use and must not be touched.
  49. // - indicates that a register is decommissioned by that step.
  50. // x indicates that a register is used as a temporary by that step.
  51. // V indicates that a register is an input or output to the macro.
  52. //
  53. #define ROUND_3_INIT() /* a b c d e f */ \
  54. LOAD("a", 0, -4) /* + */ \
  55. SHUF("a", "d", "e") /* | + x */ \
  56. LOAD("b", 1, -4) /* | + | */ \
  57. TRAN("a", "d", "e") /* | | - x */ \
  58. LOAD("c", 2, -4) /* V V V */
  59. // Define a macro that translates, shuffles and stores the input registers A, B
  60. // and C, and preloads registers D, E and F for the next round.
  61. // This macro can be arbitrarily daisy-chained by feeding output registers D, E
  62. // and F back into the next round as input registers A, B and C. The macro
  63. // carefully interleaves memory operations with data operations for optimal
  64. // pipelined performance.
  65. #define ROUND_3(ROUND, A,B,C,D,E,F) /* A B C D E F */ \
  66. LOAD(D, (ROUND + 3), -4) /* V V V + */ \
  67. SHUF(B, E, F) /* | | | | + x */ \
  68. STOR(A, (ROUND + 0)) /* - | | | | */ \
  69. TRAN(B, E, F) /* | | | - x */ \
  70. LOAD(E, (ROUND + 4), -4) /* | | | + */ \
  71. SHUF(C, A, F) /* + | | | | x */ \
  72. STOR(B, (ROUND + 1)) /* | - | | | */ \
  73. TRAN(C, A, F) /* - | | | x */ \
  74. LOAD(F, (ROUND + 5), -4) /* | | | + */ \
  75. SHUF(D, A, B) /* + x | | | | */ \
  76. STOR(C, (ROUND + 2)) /* | - | | | */ \
  77. TRAN(D, A, B) /* - x V V V */
  78. // Define a macro that terminates a ROUND_3 macro by taking pre-loaded
  79. // registers D, E and F, and translating, shuffling and storing them.
  80. #define ROUND_3_END(ROUND, A,B,C,D,E,F) /* A B C D E F */ \
  81. SHUF(E, A, B) /* + x V V V */ \
  82. STOR(D, (ROUND + 3)) /* | - | | */ \
  83. TRAN(E, A, B) /* - x | | */ \
  84. SHUF(F, C, D) /* + x | | */ \
  85. STOR(E, (ROUND + 4)) /* | - | */ \
  86. TRAN(F, C, D) /* - x | */ \
  87. STOR(F, (ROUND + 5)) /* - */
  88. // Define a type A round. Inputs are a, b, and c, outputs are d, e, and f.
  89. #define ROUND_3_A(ROUND) \
  90. ROUND_3(ROUND, "a", "b", "c", "d", "e", "f")
  91. // Define a type B round. Inputs and outputs are swapped with regard to type A.
  92. #define ROUND_3_B(ROUND) \
  93. ROUND_3(ROUND, "d", "e", "f", "a", "b", "c")
  94. // Terminating macro for a type A round.
  95. #define ROUND_3_A_LAST(ROUND) \
  96. ROUND_3_A(ROUND) \
  97. ROUND_3_END(ROUND, "a", "b", "c", "d", "e", "f")
  98. // Terminating macro for a type B round.
  99. #define ROUND_3_B_LAST(ROUND) \
  100. ROUND_3_B(ROUND) \
  101. ROUND_3_END(ROUND, "d", "e", "f", "a", "b", "c")
  102. // Suppress clang's warning that the literal string in the asm statement is
  103. // overlong (longer than the ISO-mandated minimum size of 4095 bytes for C99
  104. // compilers). It may be true, but the goal here is not C99 portability.
  105. #pragma GCC diagnostic push
  106. #pragma GCC diagnostic ignored "-Woverlength-strings"
  107. static inline void
  108. enc_loop_avx2 (const uint8_t **s, size_t *slen, uint8_t **o, size_t *olen)
  109. {
  110. // For a clearer explanation of the algorithm used by this function,
  111. // please refer to the plain (not inline assembly) implementation. This
  112. // function follows the same basic logic.
  113. if (*slen < 32) {
  114. return;
  115. }
  116. // Process blocks of 24 bytes at a time. Because blocks are loaded 32
  117. // bytes at a time an offset of -4, ensure that there will be at least
  118. // 4 remaining bytes after the last round, so that the final read will
  119. // not pass beyond the bounds of the input buffer.
  120. size_t rounds = (*slen - 4) / 24;
  121. *slen -= rounds * 24; // 24 bytes consumed per round
  122. *olen += rounds * 32; // 32 bytes produced per round
  123. // Pre-decrement the number of rounds to get the number of rounds
  124. // *after* the first round, which is handled as a special case.
  125. rounds--;
  126. // Number of times to go through the 36x loop.
  127. size_t loops = rounds / 36;
  128. // Number of rounds remaining after the 36x loop.
  129. rounds %= 36;
  130. // Lookup tables.
  131. const __m256i lut0 = _mm256_set_epi8(
  132. 10, 11, 9, 10, 7, 8, 6, 7, 4, 5, 3, 4, 1, 2, 0, 1,
  133. 14, 15, 13, 14, 11, 12, 10, 11, 8, 9, 7, 8, 5, 6, 4, 5);
  134. const __m256i lut1 = _mm256_setr_epi8(
  135. 65, 71, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -19, -16, 0, 0,
  136. 65, 71, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -19, -16, 0, 0);
  137. // Temporary registers.
  138. __m256i a, b, c, d, e;
  139. // Temporary register f doubles as the shift mask for the first round.
  140. __m256i f = _mm256_setr_epi32(0, 0, 1, 2, 3, 4, 5, 6);
  141. __asm__ volatile (
  142. // The first loop iteration requires special handling to ensure
  143. // that the read, which is normally done at an offset of -4,
  144. // does not underflow the buffer. Load the buffer at an offset
  145. // of 0 and permute the input to achieve the same effect.
  146. LOAD("a", 0, 0)
  147. "vpermd %[a], %[f], %[a] \n\t"
  148. // Perform the standard shuffling and translation steps.
  149. SHUF("a", "b", "c")
  150. TRAN("a", "b", "c")
  151. // Store the result and increment the source and dest pointers.
  152. "vmovdqu %[a], (%[dst]) \n\t"
  153. "add $24, %[src] \n\t"
  154. "add $32, %[dst] \n\t"
  155. // If there are 36 rounds or more, enter a 36x unrolled loop of
  156. // interleaved encoding rounds. The rounds interleave memory
  157. // operations (load/store) with data operations (table lookups,
  158. // etc) to maximize pipeline throughput.
  159. " test %[loops], %[loops] \n\t"
  160. " jz 18f \n\t"
  161. " jmp 36f \n\t"
  162. " \n\t"
  163. ".balign 64 \n\t"
  164. "36: " ROUND_3_INIT()
  165. " " ROUND_3_A( 0)
  166. " " ROUND_3_B( 3)
  167. " " ROUND_3_A( 6)
  168. " " ROUND_3_B( 9)
  169. " " ROUND_3_A(12)
  170. " " ROUND_3_B(15)
  171. " " ROUND_3_A(18)
  172. " " ROUND_3_B(21)
  173. " " ROUND_3_A(24)
  174. " " ROUND_3_B(27)
  175. " " ROUND_3_A_LAST(30)
  176. " add $(24 * 36), %[src] \n\t"
  177. " add $(32 * 36), %[dst] \n\t"
  178. " dec %[loops] \n\t"
  179. " jnz 36b \n\t"
  180. // Enter an 18x unrolled loop for rounds of 18 or more.
  181. "18: cmp $18, %[rounds] \n\t"
  182. " jl 9f \n\t"
  183. " " ROUND_3_INIT()
  184. " " ROUND_3_A(0)
  185. " " ROUND_3_B(3)
  186. " " ROUND_3_A(6)
  187. " " ROUND_3_B(9)
  188. " " ROUND_3_A_LAST(12)
  189. " sub $18, %[rounds] \n\t"
  190. " add $(24 * 18), %[src] \n\t"
  191. " add $(32 * 18), %[dst] \n\t"
  192. // Enter a 9x unrolled loop for rounds of 9 or more.
  193. "9: cmp $9, %[rounds] \n\t"
  194. " jl 6f \n\t"
  195. " " ROUND_3_INIT()
  196. " " ROUND_3_A(0)
  197. " " ROUND_3_B_LAST(3)
  198. " sub $9, %[rounds] \n\t"
  199. " add $(24 * 9), %[src] \n\t"
  200. " add $(32 * 9), %[dst] \n\t"
  201. // Enter a 6x unrolled loop for rounds of 6 or more.
  202. "6: cmp $6, %[rounds] \n\t"
  203. " jl 55f \n\t"
  204. " " ROUND_3_INIT()
  205. " " ROUND_3_A_LAST(0)
  206. " sub $6, %[rounds] \n\t"
  207. " add $(24 * 6), %[src] \n\t"
  208. " add $(32 * 6), %[dst] \n\t"
  209. // Dispatch the remaining rounds 0..5.
  210. "55: cmp $3, %[rounds] \n\t"
  211. " jg 45f \n\t"
  212. " je 3f \n\t"
  213. " cmp $1, %[rounds] \n\t"
  214. " jg 2f \n\t"
  215. " je 1f \n\t"
  216. " jmp 0f \n\t"
  217. "45: cmp $4, %[rounds] \n\t"
  218. " je 4f \n\t"
  219. // Block of non-interlaced encoding rounds, which can each
  220. // individually be jumped to. Rounds fall through to the next.
  221. "5: " ROUND()
  222. "4: " ROUND()
  223. "3: " ROUND()
  224. "2: " ROUND()
  225. "1: " ROUND()
  226. "0: \n\t"
  227. // Outputs (modified).
  228. : [rounds] "+r" (rounds),
  229. [loops] "+r" (loops),
  230. [src] "+r" (*s),
  231. [dst] "+r" (*o),
  232. [a] "=&x" (a),
  233. [b] "=&x" (b),
  234. [c] "=&x" (c),
  235. [d] "=&x" (d),
  236. [e] "=&x" (e),
  237. [f] "+x" (f)
  238. // Inputs (not modified).
  239. : [lut0] "x" (lut0),
  240. [lut1] "x" (lut1),
  241. [msk0] "x" (_mm256_set1_epi32(0x0FC0FC00)),
  242. [msk1] "x" (_mm256_set1_epi32(0x04000040)),
  243. [msk2] "x" (_mm256_set1_epi32(0x003F03F0)),
  244. [msk3] "x" (_mm256_set1_epi32(0x01000010)),
  245. [n51] "x" (_mm256_set1_epi8(51)),
  246. [n25] "x" (_mm256_set1_epi8(25))
  247. // Clobbers.
  248. : "cc", "memory"
  249. );
  250. }
  251. #pragma GCC diagnostic pop