sha256-ni.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. /*
  2. * Hardware-accelerated implementation of SHA-256 using x86 SHA-NI.
  3. */
  4. #include "ssh.h"
  5. #include "sha256.h"
  6. #include <wmmintrin.h>
  7. #include <smmintrin.h>
  8. #include <immintrin.h>
  9. #if HAVE_SHAINTRIN_H
  10. #include <shaintrin.h>
  11. #endif
  12. #if defined(__clang__) || defined(__GNUC__)
  13. #include <cpuid.h>
  14. #define GET_CPU_ID_0(out) \
  15. __cpuid(0, (out)[0], (out)[1], (out)[2], (out)[3])
  16. #define GET_CPU_ID_7(out) \
  17. __cpuid_count(7, 0, (out)[0], (out)[1], (out)[2], (out)[3])
  18. #else
  19. #define GET_CPU_ID_0(out) __cpuid(out, 0)
  20. #define GET_CPU_ID_7(out) __cpuidex(out, 7, 0)
  21. #endif
  22. static bool sha256_ni_available(void)
  23. {
  24. unsigned int CPUInfo[4];
  25. GET_CPU_ID_0(CPUInfo);
  26. if (CPUInfo[0] < 7)
  27. return false;
  28. GET_CPU_ID_7(CPUInfo);
  29. return CPUInfo[1] & (1 << 29); /* Check SHA */
  30. }
  31. /* SHA256 implementation using new instructions
  32. The code is based on Jeffrey Walton's SHA256 implementation:
  33. https://github.com/noloader/SHA-Intrinsics
  34. */
  35. static inline void sha256_ni_block(__m128i *core, const uint8_t *p)
  36. {
  37. __m128i STATE0, STATE1;
  38. __m128i MSG, TMP;
  39. __m128i MSG0, MSG1, MSG2, MSG3;
  40. const __m128i *block = (const __m128i *)p;
  41. const __m128i MASK = _mm_set_epi64x(
  42. 0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL);
  43. /* Load initial values */
  44. STATE0 = core[0];
  45. STATE1 = core[1];
  46. /* Rounds 0-3 */
  47. MSG = _mm_loadu_si128(block);
  48. MSG0 = _mm_shuffle_epi8(MSG, MASK);
  49. MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(
  50. 0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL));
  51. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  52. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  53. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  54. /* Rounds 4-7 */
  55. MSG1 = _mm_loadu_si128(block + 1);
  56. MSG1 = _mm_shuffle_epi8(MSG1, MASK);
  57. MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(
  58. 0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL));
  59. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  60. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  61. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  62. MSG0 = _mm_sha256msg1_epu32(MSG0, MSG1);
  63. /* Rounds 8-11 */
  64. MSG2 = _mm_loadu_si128(block + 2);
  65. MSG2 = _mm_shuffle_epi8(MSG2, MASK);
  66. MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(
  67. 0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL));
  68. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  69. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  70. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  71. MSG1 = _mm_sha256msg1_epu32(MSG1, MSG2);
  72. /* Rounds 12-15 */
  73. MSG3 = _mm_loadu_si128(block + 3);
  74. MSG3 = _mm_shuffle_epi8(MSG3, MASK);
  75. MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(
  76. 0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL));
  77. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  78. TMP = _mm_alignr_epi8(MSG3, MSG2, 4);
  79. MSG0 = _mm_add_epi32(MSG0, TMP);
  80. MSG0 = _mm_sha256msg2_epu32(MSG0, MSG3);
  81. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  82. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  83. MSG2 = _mm_sha256msg1_epu32(MSG2, MSG3);
  84. /* Rounds 16-19 */
  85. MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(
  86. 0x240CA1CC0FC19DC6ULL, 0xEFBE4786E49B69C1ULL));
  87. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  88. TMP = _mm_alignr_epi8(MSG0, MSG3, 4);
  89. MSG1 = _mm_add_epi32(MSG1, TMP);
  90. MSG1 = _mm_sha256msg2_epu32(MSG1, MSG0);
  91. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  92. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  93. MSG3 = _mm_sha256msg1_epu32(MSG3, MSG0);
  94. /* Rounds 20-23 */
  95. MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(
  96. 0x76F988DA5CB0A9DCULL, 0x4A7484AA2DE92C6FULL));
  97. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  98. TMP = _mm_alignr_epi8(MSG1, MSG0, 4);
  99. MSG2 = _mm_add_epi32(MSG2, TMP);
  100. MSG2 = _mm_sha256msg2_epu32(MSG2, MSG1);
  101. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  102. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  103. MSG0 = _mm_sha256msg1_epu32(MSG0, MSG1);
  104. /* Rounds 24-27 */
  105. MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(
  106. 0xBF597FC7B00327C8ULL, 0xA831C66D983E5152ULL));
  107. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  108. TMP = _mm_alignr_epi8(MSG2, MSG1, 4);
  109. MSG3 = _mm_add_epi32(MSG3, TMP);
  110. MSG3 = _mm_sha256msg2_epu32(MSG3, MSG2);
  111. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  112. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  113. MSG1 = _mm_sha256msg1_epu32(MSG1, MSG2);
  114. /* Rounds 28-31 */
  115. MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(
  116. 0x1429296706CA6351ULL, 0xD5A79147C6E00BF3ULL));
  117. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  118. TMP = _mm_alignr_epi8(MSG3, MSG2, 4);
  119. MSG0 = _mm_add_epi32(MSG0, TMP);
  120. MSG0 = _mm_sha256msg2_epu32(MSG0, MSG3);
  121. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  122. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  123. MSG2 = _mm_sha256msg1_epu32(MSG2, MSG3);
  124. /* Rounds 32-35 */
  125. MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(
  126. 0x53380D134D2C6DFCULL, 0x2E1B213827B70A85ULL));
  127. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  128. TMP = _mm_alignr_epi8(MSG0, MSG3, 4);
  129. MSG1 = _mm_add_epi32(MSG1, TMP);
  130. MSG1 = _mm_sha256msg2_epu32(MSG1, MSG0);
  131. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  132. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  133. MSG3 = _mm_sha256msg1_epu32(MSG3, MSG0);
  134. /* Rounds 36-39 */
  135. MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(
  136. 0x92722C8581C2C92EULL, 0x766A0ABB650A7354ULL));
  137. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  138. TMP = _mm_alignr_epi8(MSG1, MSG0, 4);
  139. MSG2 = _mm_add_epi32(MSG2, TMP);
  140. MSG2 = _mm_sha256msg2_epu32(MSG2, MSG1);
  141. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  142. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  143. MSG0 = _mm_sha256msg1_epu32(MSG0, MSG1);
  144. /* Rounds 40-43 */
  145. MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(
  146. 0xC76C51A3C24B8B70ULL, 0xA81A664BA2BFE8A1ULL));
  147. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  148. TMP = _mm_alignr_epi8(MSG2, MSG1, 4);
  149. MSG3 = _mm_add_epi32(MSG3, TMP);
  150. MSG3 = _mm_sha256msg2_epu32(MSG3, MSG2);
  151. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  152. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  153. MSG1 = _mm_sha256msg1_epu32(MSG1, MSG2);
  154. /* Rounds 44-47 */
  155. MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(
  156. 0x106AA070F40E3585ULL, 0xD6990624D192E819ULL));
  157. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  158. TMP = _mm_alignr_epi8(MSG3, MSG2, 4);
  159. MSG0 = _mm_add_epi32(MSG0, TMP);
  160. MSG0 = _mm_sha256msg2_epu32(MSG0, MSG3);
  161. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  162. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  163. MSG2 = _mm_sha256msg1_epu32(MSG2, MSG3);
  164. /* Rounds 48-51 */
  165. MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(
  166. 0x34B0BCB52748774CULL, 0x1E376C0819A4C116ULL));
  167. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  168. TMP = _mm_alignr_epi8(MSG0, MSG3, 4);
  169. MSG1 = _mm_add_epi32(MSG1, TMP);
  170. MSG1 = _mm_sha256msg2_epu32(MSG1, MSG0);
  171. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  172. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  173. MSG3 = _mm_sha256msg1_epu32(MSG3, MSG0);
  174. /* Rounds 52-55 */
  175. MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(
  176. 0x682E6FF35B9CCA4FULL, 0x4ED8AA4A391C0CB3ULL));
  177. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  178. TMP = _mm_alignr_epi8(MSG1, MSG0, 4);
  179. MSG2 = _mm_add_epi32(MSG2, TMP);
  180. MSG2 = _mm_sha256msg2_epu32(MSG2, MSG1);
  181. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  182. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  183. /* Rounds 56-59 */
  184. MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(
  185. 0x8CC7020884C87814ULL, 0x78A5636F748F82EEULL));
  186. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  187. TMP = _mm_alignr_epi8(MSG2, MSG1, 4);
  188. MSG3 = _mm_add_epi32(MSG3, TMP);
  189. MSG3 = _mm_sha256msg2_epu32(MSG3, MSG2);
  190. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  191. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  192. /* Rounds 60-63 */
  193. MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(
  194. 0xC67178F2BEF9A3F7ULL, 0xA4506CEB90BEFFFAULL));
  195. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  196. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  197. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  198. /* Combine state */
  199. core[0] = _mm_add_epi32(STATE0, core[0]);
  200. core[1] = _mm_add_epi32(STATE1, core[1]);
  201. }
  202. typedef struct sha256_ni {
  203. /*
  204. * These two vectors store the 8 words of the SHA-256 state, but
  205. * not in the same order they appear in the spec: the first word
  206. * holds A,B,E,F and the second word C,D,G,H.
  207. */
  208. __m128i core[2];
  209. sha256_block blk;
  210. void *pointer_to_free;
  211. BinarySink_IMPLEMENTATION;
  212. ssh_hash hash;
  213. } sha256_ni;
  214. static void sha256_ni_write(BinarySink *bs, const void *vp, size_t len);
  215. static sha256_ni *sha256_ni_alloc(void)
  216. {
  217. /*
  218. * The __m128i variables in the context structure need to be
  219. * 16-byte aligned, but not all malloc implementations that this
  220. * code has to work with will guarantee to return a 16-byte
  221. * aligned pointer. So we over-allocate, manually realign the
  222. * pointer ourselves, and store the original one inside the
  223. * context so we know how to free it later.
  224. */
  225. void *allocation = smalloc(sizeof(sha256_ni) + 15);
  226. uintptr_t alloc_address = (uintptr_t)allocation;
  227. uintptr_t aligned_address = (alloc_address + 15) & ~15;
  228. sha256_ni *s = (sha256_ni *)aligned_address;
  229. s->pointer_to_free = allocation;
  230. return s;
  231. }
  232. static ssh_hash *sha256_ni_new(const ssh_hashalg *alg)
  233. {
  234. const struct sha256_extra *extra = (const struct sha256_extra *)alg->extra;
  235. if (!check_availability(extra))
  236. return NULL;
  237. sha256_ni *s = sha256_ni_alloc();
  238. s->hash.vt = alg;
  239. BinarySink_INIT(s, sha256_ni_write);
  240. BinarySink_DELEGATE_INIT(&s->hash, s);
  241. return &s->hash;
  242. }
  243. static void sha256_ni_reset(ssh_hash *hash)
  244. {
  245. sha256_ni *s = container_of(hash, sha256_ni, hash);
  246. /* Initialise the core vectors in their storage order */
  247. s->core[0] = _mm_set_epi64x(
  248. 0x6a09e667bb67ae85ULL, 0x510e527f9b05688cULL);
  249. s->core[1] = _mm_set_epi64x(
  250. 0x3c6ef372a54ff53aULL, 0x1f83d9ab5be0cd19ULL);
  251. sha256_block_setup(&s->blk);
  252. }
  253. static void sha256_ni_copyfrom(ssh_hash *hcopy, ssh_hash *horig)
  254. {
  255. sha256_ni *copy = container_of(hcopy, sha256_ni, hash);
  256. sha256_ni *orig = container_of(horig, sha256_ni, hash);
  257. void *ptf_save = copy->pointer_to_free;
  258. *copy = *orig; /* structure copy */
  259. copy->pointer_to_free = ptf_save;
  260. BinarySink_COPIED(copy);
  261. BinarySink_DELEGATE_INIT(&copy->hash, copy);
  262. }
  263. static void sha256_ni_free(ssh_hash *hash)
  264. {
  265. sha256_ni *s = container_of(hash, sha256_ni, hash);
  266. void *ptf = s->pointer_to_free;
  267. smemclr(s, sizeof(*s));
  268. sfree(ptf);
  269. }
  270. static void sha256_ni_write(BinarySink *bs, const void *vp, size_t len)
  271. {
  272. sha256_ni *s = BinarySink_DOWNCAST(bs, sha256_ni);
  273. while (len > 0)
  274. if (sha256_block_write(&s->blk, &vp, &len))
  275. sha256_ni_block(s->core, s->blk.block);
  276. }
  277. static void sha256_ni_digest(ssh_hash *hash, uint8_t *digest)
  278. {
  279. sha256_ni *s = container_of(hash, sha256_ni, hash);
  280. sha256_block_pad(&s->blk, BinarySink_UPCAST(s));
  281. /* Rearrange the words into the output order */
  282. __m128i feba = _mm_shuffle_epi32(s->core[0], 0x1B);
  283. __m128i dchg = _mm_shuffle_epi32(s->core[1], 0xB1);
  284. __m128i dcba = _mm_blend_epi16(feba, dchg, 0xF0);
  285. __m128i hgfe = _mm_alignr_epi8(dchg, feba, 8);
  286. /* Byte-swap them into the output endianness */
  287. const __m128i mask = _mm_setr_epi8(3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12);
  288. dcba = _mm_shuffle_epi8(dcba, mask);
  289. hgfe = _mm_shuffle_epi8(hgfe, mask);
  290. /* And store them */
  291. __m128i *output = (__m128i *)digest;
  292. _mm_storeu_si128(output, dcba);
  293. _mm_storeu_si128(output+1, hgfe);
  294. }
  295. SHA256_VTABLE(ni, "SHA-NI accelerated");