sha1-ni.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. /*
  2. * Hardware-accelerated implementation of SHA-1 using x86 SHA-NI.
  3. */
  4. #include "ssh.h"
  5. #include "sha1.h"
  6. #include <wmmintrin.h>
  7. #include <smmintrin.h>
  8. #include <immintrin.h>
  9. #if HAVE_SHAINTRIN_H
  10. #include <shaintrin.h>
  11. #endif
  12. #if defined(__clang__) || defined(__GNUC__)
  13. #include <cpuid.h>
  14. #define GET_CPU_ID_0(out) \
  15. __cpuid(0, (out)[0], (out)[1], (out)[2], (out)[3])
  16. #define GET_CPU_ID_7(out) \
  17. __cpuid_count(7, 0, (out)[0], (out)[1], (out)[2], (out)[3])
  18. #else
  19. #define GET_CPU_ID_0(out) __cpuid(out, 0)
  20. #define GET_CPU_ID_7(out) __cpuidex(out, 7, 0)
  21. #endif
  22. static bool sha1_ni_available(void)
  23. {
  24. unsigned int CPUInfo[4];
  25. GET_CPU_ID_0(CPUInfo);
  26. if (CPUInfo[0] < 7)
  27. return false;
  28. GET_CPU_ID_7(CPUInfo);
  29. return CPUInfo[1] & (1 << 29); /* Check SHA */
  30. }
  31. /* SHA1 implementation using new instructions
  32. The code is based on Jeffrey Walton's SHA1 implementation:
  33. https://github.com/noloader/SHA-Intrinsics
  34. */
  35. static inline void sha1_ni_block(__m128i *core, const uint8_t *p)
  36. {
  37. __m128i ABCD, E0, E1, MSG0, MSG1, MSG2, MSG3;
  38. const __m128i MASK = _mm_set_epi64x(
  39. 0x0001020304050607ULL, 0x08090a0b0c0d0e0fULL);
  40. const __m128i *block = (const __m128i *)p;
  41. /* Load initial values */
  42. ABCD = core[0];
  43. E0 = core[1];
  44. /* Rounds 0-3 */
  45. MSG0 = _mm_loadu_si128(block);
  46. MSG0 = _mm_shuffle_epi8(MSG0, MASK);
  47. E0 = _mm_add_epi32(E0, MSG0);
  48. E1 = ABCD;
  49. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
  50. /* Rounds 4-7 */
  51. MSG1 = _mm_loadu_si128(block + 1);
  52. MSG1 = _mm_shuffle_epi8(MSG1, MASK);
  53. E1 = _mm_sha1nexte_epu32(E1, MSG1);
  54. E0 = ABCD;
  55. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
  56. MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
  57. /* Rounds 8-11 */
  58. MSG2 = _mm_loadu_si128(block + 2);
  59. MSG2 = _mm_shuffle_epi8(MSG2, MASK);
  60. E0 = _mm_sha1nexte_epu32(E0, MSG2);
  61. E1 = ABCD;
  62. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
  63. MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
  64. MSG0 = _mm_xor_si128(MSG0, MSG2);
  65. /* Rounds 12-15 */
  66. MSG3 = _mm_loadu_si128(block + 3);
  67. MSG3 = _mm_shuffle_epi8(MSG3, MASK);
  68. E1 = _mm_sha1nexte_epu32(E1, MSG3);
  69. E0 = ABCD;
  70. MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
  71. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
  72. MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
  73. MSG1 = _mm_xor_si128(MSG1, MSG3);
  74. /* Rounds 16-19 */
  75. E0 = _mm_sha1nexte_epu32(E0, MSG0);
  76. E1 = ABCD;
  77. MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
  78. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
  79. MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
  80. MSG2 = _mm_xor_si128(MSG2, MSG0);
  81. /* Rounds 20-23 */
  82. E1 = _mm_sha1nexte_epu32(E1, MSG1);
  83. E0 = ABCD;
  84. MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
  85. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
  86. MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
  87. MSG3 = _mm_xor_si128(MSG3, MSG1);
  88. /* Rounds 24-27 */
  89. E0 = _mm_sha1nexte_epu32(E0, MSG2);
  90. E1 = ABCD;
  91. MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
  92. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
  93. MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
  94. MSG0 = _mm_xor_si128(MSG0, MSG2);
  95. /* Rounds 28-31 */
  96. E1 = _mm_sha1nexte_epu32(E1, MSG3);
  97. E0 = ABCD;
  98. MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
  99. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
  100. MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
  101. MSG1 = _mm_xor_si128(MSG1, MSG3);
  102. /* Rounds 32-35 */
  103. E0 = _mm_sha1nexte_epu32(E0, MSG0);
  104. E1 = ABCD;
  105. MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
  106. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
  107. MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
  108. MSG2 = _mm_xor_si128(MSG2, MSG0);
  109. /* Rounds 36-39 */
  110. E1 = _mm_sha1nexte_epu32(E1, MSG1);
  111. E0 = ABCD;
  112. MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
  113. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
  114. MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
  115. MSG3 = _mm_xor_si128(MSG3, MSG1);
  116. /* Rounds 40-43 */
  117. E0 = _mm_sha1nexte_epu32(E0, MSG2);
  118. E1 = ABCD;
  119. MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
  120. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
  121. MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
  122. MSG0 = _mm_xor_si128(MSG0, MSG2);
  123. /* Rounds 44-47 */
  124. E1 = _mm_sha1nexte_epu32(E1, MSG3);
  125. E0 = ABCD;
  126. MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
  127. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
  128. MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
  129. MSG1 = _mm_xor_si128(MSG1, MSG3);
  130. /* Rounds 48-51 */
  131. E0 = _mm_sha1nexte_epu32(E0, MSG0);
  132. E1 = ABCD;
  133. MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
  134. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
  135. MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
  136. MSG2 = _mm_xor_si128(MSG2, MSG0);
  137. /* Rounds 52-55 */
  138. E1 = _mm_sha1nexte_epu32(E1, MSG1);
  139. E0 = ABCD;
  140. MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
  141. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
  142. MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
  143. MSG3 = _mm_xor_si128(MSG3, MSG1);
  144. /* Rounds 56-59 */
  145. E0 = _mm_sha1nexte_epu32(E0, MSG2);
  146. E1 = ABCD;
  147. MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
  148. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
  149. MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
  150. MSG0 = _mm_xor_si128(MSG0, MSG2);
  151. /* Rounds 60-63 */
  152. E1 = _mm_sha1nexte_epu32(E1, MSG3);
  153. E0 = ABCD;
  154. MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
  155. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
  156. MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
  157. MSG1 = _mm_xor_si128(MSG1, MSG3);
  158. /* Rounds 64-67 */
  159. E0 = _mm_sha1nexte_epu32(E0, MSG0);
  160. E1 = ABCD;
  161. MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
  162. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
  163. MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
  164. MSG2 = _mm_xor_si128(MSG2, MSG0);
  165. /* Rounds 68-71 */
  166. E1 = _mm_sha1nexte_epu32(E1, MSG1);
  167. E0 = ABCD;
  168. MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
  169. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
  170. MSG3 = _mm_xor_si128(MSG3, MSG1);
  171. /* Rounds 72-75 */
  172. E0 = _mm_sha1nexte_epu32(E0, MSG2);
  173. E1 = ABCD;
  174. MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
  175. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
  176. /* Rounds 76-79 */
  177. E1 = _mm_sha1nexte_epu32(E1, MSG3);
  178. E0 = ABCD;
  179. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
  180. /* Combine state */
  181. core[0] = _mm_add_epi32(ABCD, core[0]);
  182. core[1] = _mm_sha1nexte_epu32(E0, core[1]);
  183. }
  184. typedef struct sha1_ni {
  185. /*
  186. * core[0] stores the first four words of the SHA-1 state. core[1]
  187. * stores just the fifth word, in the vector lane at the highest
  188. * address.
  189. */
  190. __m128i core[2];
  191. sha1_block blk;
  192. void *pointer_to_free;
  193. BinarySink_IMPLEMENTATION;
  194. ssh_hash hash;
  195. } sha1_ni;
  196. static void sha1_ni_write(BinarySink *bs, const void *vp, size_t len);
  197. static sha1_ni *sha1_ni_alloc(void)
  198. {
  199. /*
  200. * The __m128i variables in the context structure need to be
  201. * 16-byte aligned, but not all malloc implementations that this
  202. * code has to work with will guarantee to return a 16-byte
  203. * aligned pointer. So we over-allocate, manually realign the
  204. * pointer ourselves, and store the original one inside the
  205. * context so we know how to free it later.
  206. */
  207. void *allocation = smalloc(sizeof(sha1_ni) + 15);
  208. uintptr_t alloc_address = (uintptr_t)allocation;
  209. uintptr_t aligned_address = (alloc_address + 15) & ~15;
  210. sha1_ni *s = (sha1_ni *)aligned_address;
  211. s->pointer_to_free = allocation;
  212. return s;
  213. }
  214. static ssh_hash *sha1_ni_new(const ssh_hashalg *alg)
  215. {
  216. const struct sha1_extra *extra = (const struct sha1_extra *)alg->extra;
  217. if (!check_availability(extra))
  218. return NULL;
  219. sha1_ni *s = sha1_ni_alloc();
  220. s->hash.vt = alg;
  221. BinarySink_INIT(s, sha1_ni_write);
  222. BinarySink_DELEGATE_INIT(&s->hash, s);
  223. return &s->hash;
  224. }
  225. static void sha1_ni_reset(ssh_hash *hash)
  226. {
  227. sha1_ni *s = container_of(hash, sha1_ni, hash);
  228. /* Initialise the core vectors in their storage order */
  229. s->core[0] = _mm_set_epi64x(
  230. 0x67452301efcdab89ULL, 0x98badcfe10325476ULL);
  231. s->core[1] = _mm_set_epi32(0xc3d2e1f0, 0, 0, 0);
  232. sha1_block_setup(&s->blk);
  233. }
  234. static void sha1_ni_copyfrom(ssh_hash *hcopy, ssh_hash *horig)
  235. {
  236. sha1_ni *copy = container_of(hcopy, sha1_ni, hash);
  237. sha1_ni *orig = container_of(horig, sha1_ni, hash);
  238. void *ptf_save = copy->pointer_to_free;
  239. *copy = *orig; /* structure copy */
  240. copy->pointer_to_free = ptf_save;
  241. BinarySink_COPIED(copy);
  242. BinarySink_DELEGATE_INIT(&copy->hash, copy);
  243. }
  244. static void sha1_ni_free(ssh_hash *hash)
  245. {
  246. sha1_ni *s = container_of(hash, sha1_ni, hash);
  247. void *ptf = s->pointer_to_free;
  248. smemclr(s, sizeof(*s));
  249. sfree(ptf);
  250. }
  251. static void sha1_ni_write(BinarySink *bs, const void *vp, size_t len)
  252. {
  253. sha1_ni *s = BinarySink_DOWNCAST(bs, sha1_ni);
  254. while (len > 0)
  255. if (sha1_block_write(&s->blk, &vp, &len))
  256. sha1_ni_block(s->core, s->blk.block);
  257. }
  258. static void sha1_ni_digest(ssh_hash *hash, uint8_t *digest)
  259. {
  260. sha1_ni *s = container_of(hash, sha1_ni, hash);
  261. sha1_block_pad(&s->blk, BinarySink_UPCAST(s));
  262. /* Rearrange the first vector into its output order */
  263. __m128i abcd = _mm_shuffle_epi32(s->core[0], 0x1B);
  264. /* Byte-swap it into the output endianness */
  265. const __m128i mask = _mm_setr_epi8(3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12);
  266. abcd = _mm_shuffle_epi8(abcd, mask);
  267. /* And store it */
  268. _mm_storeu_si128((__m128i *)digest, abcd);
  269. /* Finally, store the leftover word */
  270. uint32_t e = _mm_extract_epi32(s->core[1], 3);
  271. PUT_32BIT_MSB_FIRST(digest + 16, e);
  272. }
  273. SHA1_VTABLE(ni, "SHA-NI accelerated");