aes-ni.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. /*
  2. * Hardware-accelerated implementation of AES using x86 AES-NI.
  3. */
  4. #include "ssh.h"
  5. #include "aes.h"
  6. #include <wmmintrin.h>
  7. #include <smmintrin.h>
  8. #if defined(__clang__) || defined(__GNUC__)
  9. #include <cpuid.h>
  10. #define GET_CPU_ID(out) __cpuid(1, (out)[0], (out)[1], (out)[2], (out)[3])
  11. #else
  12. #define GET_CPU_ID(out) __cpuid(out, 1)
  13. #endif
  14. static bool aes_ni_available(void)
  15. {
  16. /*
  17. * Determine if AES is available on this CPU, by checking that
  18. * both AES itself and SSE4.1 are supported.
  19. */
  20. unsigned int CPUInfo[4];
  21. GET_CPU_ID(CPUInfo);
  22. return (CPUInfo[2] & (1 << 25)) && (CPUInfo[2] & (1 << 19));
  23. }
  24. /*
  25. * Core AES-NI encrypt/decrypt functions, one per length and direction.
  26. */
  27. #define NI_CIPHER(len, dir, dirlong, repmacro) \
  28. static inline __m128i aes_ni_##len##_##dir( \
  29. __m128i v, const __m128i *keysched) \
  30. { \
  31. v = _mm_xor_si128(v, *keysched++); \
  32. repmacro(v = _mm_aes##dirlong##_si128(v, *keysched++);); \
  33. return _mm_aes##dirlong##last_si128(v, *keysched); \
  34. }
  35. NI_CIPHER(128, e, enc, REP9)
  36. NI_CIPHER(128, d, dec, REP9)
  37. NI_CIPHER(192, e, enc, REP11)
  38. NI_CIPHER(192, d, dec, REP11)
  39. NI_CIPHER(256, e, enc, REP13)
  40. NI_CIPHER(256, d, dec, REP13)
  41. /*
  42. * The main key expansion.
  43. */
  44. static void aes_ni_key_expand(
  45. const unsigned char *key, size_t key_words,
  46. __m128i *keysched_e, __m128i *keysched_d)
  47. {
  48. size_t rounds = key_words + 6;
  49. size_t sched_words = (rounds + 1) * 4;
  50. /*
  51. * Store the key schedule as 32-bit integers during expansion, so
  52. * that it's easy to refer back to individual previous words. We
  53. * collect them into the final __m128i form at the end.
  54. */
  55. uint32_t sched[MAXROUNDKEYS * 4];
  56. unsigned rconpos = 0;
  57. for (size_t i = 0; i < sched_words; i++) {
  58. if (i < key_words) {
  59. sched[i] = GET_32BIT_LSB_FIRST(key + 4 * i);
  60. } else {
  61. uint32_t temp = sched[i - 1];
  62. bool rotate_and_round_constant = (i % key_words == 0);
  63. bool only_sub = (key_words == 8 && i % 8 == 4);
  64. if (rotate_and_round_constant) {
  65. __m128i v = _mm_setr_epi32(0,temp,0,0);
  66. v = _mm_aeskeygenassist_si128(v, 0);
  67. temp = _mm_extract_epi32(v, 1);
  68. assert(rconpos < lenof(aes_key_setup_round_constants));
  69. temp ^= aes_key_setup_round_constants[rconpos++];
  70. } else if (only_sub) {
  71. __m128i v = _mm_setr_epi32(0,temp,0,0);
  72. v = _mm_aeskeygenassist_si128(v, 0);
  73. temp = _mm_extract_epi32(v, 0);
  74. }
  75. sched[i] = sched[i - key_words] ^ temp;
  76. }
  77. }
  78. /*
  79. * Combine the key schedule words into __m128i vectors and store
  80. * them in the output context.
  81. */
  82. for (size_t round = 0; round <= rounds; round++)
  83. keysched_e[round] = _mm_setr_epi32(
  84. sched[4*round ], sched[4*round+1],
  85. sched[4*round+2], sched[4*round+3]);
  86. smemclr(sched, sizeof(sched));
  87. /*
  88. * Now prepare the modified keys for the inverse cipher.
  89. */
  90. for (size_t eround = 0; eround <= rounds; eround++) {
  91. size_t dround = rounds - eround;
  92. __m128i rkey = keysched_e[eround];
  93. if (eround && dround) /* neither first nor last */
  94. rkey = _mm_aesimc_si128(rkey);
  95. keysched_d[dround] = rkey;
  96. }
  97. }
  98. /*
  99. * Auxiliary routine to increment the 128-bit counter used in SDCTR
  100. * mode.
  101. */
  102. static inline __m128i aes_ni_sdctr_increment(__m128i v)
  103. {
  104. const __m128i ONE = _mm_setr_epi32(1,0,0,0);
  105. const __m128i ZERO = _mm_setzero_si128();
  106. /* Increment the low-order 64 bits of v */
  107. v = _mm_add_epi64(v, ONE);
  108. /* Check if they've become zero */
  109. __m128i cmp = _mm_cmpeq_epi64(v, ZERO);
  110. /* If so, the low half of cmp is all 1s. Pack that into the high
  111. * half of addend with zero in the low half. */
  112. __m128i addend = _mm_unpacklo_epi64(ZERO, cmp);
  113. /* And subtract that from v, which increments the high 64 bits iff
  114. * the low 64 wrapped round. */
  115. v = _mm_sub_epi64(v, addend);
  116. return v;
  117. }
  118. /*
  119. * Much simpler auxiliary routine to increment the counter for GCM
  120. * mode. This only has to increment the low word.
  121. */
  122. static inline __m128i aes_ni_gcm_increment(__m128i v)
  123. {
  124. const __m128i ONE = _mm_setr_epi32(1,0,0,0);
  125. return _mm_add_epi32(v, ONE);
  126. }
  127. /*
  128. * Auxiliary routine to reverse the byte order of a vector, so that
  129. * the SDCTR IV can be made big-endian for feeding to the cipher.
  130. */
  131. static inline __m128i aes_ni_sdctr_reverse(__m128i v)
  132. {
  133. v = _mm_shuffle_epi8(
  134. v, _mm_setr_epi8(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0));
  135. return v;
  136. }
  137. /*
  138. * The SSH interface and the cipher modes.
  139. */
  140. typedef struct aes_ni_context aes_ni_context;
  141. struct aes_ni_context {
  142. __m128i keysched_e[MAXROUNDKEYS], keysched_d[MAXROUNDKEYS], iv;
  143. void *pointer_to_free;
  144. ssh_cipher ciph;
  145. };
  146. static ssh_cipher *aes_ni_new(const ssh_cipheralg *alg)
  147. {
  148. const struct aes_extra *extra = (const struct aes_extra *)alg->extra;
  149. if (!check_availability(extra))
  150. return NULL;
  151. /*
  152. * The __m128i variables in the context structure need to be
  153. * 16-byte aligned, but not all malloc implementations that this
  154. * code has to work with will guarantee to return a 16-byte
  155. * aligned pointer. So we over-allocate, manually realign the
  156. * pointer ourselves, and store the original one inside the
  157. * context so we know how to free it later.
  158. */
  159. void *allocation = smalloc(sizeof(aes_ni_context) + 15);
  160. uintptr_t alloc_address = (uintptr_t)allocation;
  161. uintptr_t aligned_address = (alloc_address + 15) & ~15;
  162. aes_ni_context *ctx = (aes_ni_context *)aligned_address;
  163. ctx->ciph.vt = alg;
  164. ctx->pointer_to_free = allocation;
  165. return &ctx->ciph;
  166. }
  167. static void aes_ni_free(ssh_cipher *ciph)
  168. {
  169. aes_ni_context *ctx = container_of(ciph, aes_ni_context, ciph);
  170. void *allocation = ctx->pointer_to_free;
  171. smemclr(ctx, sizeof(*ctx));
  172. sfree(allocation);
  173. }
  174. static void aes_ni_setkey(ssh_cipher *ciph, const void *vkey)
  175. {
  176. aes_ni_context *ctx = container_of(ciph, aes_ni_context, ciph);
  177. const unsigned char *key = (const unsigned char *)vkey;
  178. aes_ni_key_expand(key, ctx->ciph.vt->real_keybits / 32,
  179. ctx->keysched_e, ctx->keysched_d);
  180. }
  181. static void aes_ni_setiv_cbc(ssh_cipher *ciph, const void *iv)
  182. {
  183. aes_ni_context *ctx = container_of(ciph, aes_ni_context, ciph);
  184. ctx->iv = _mm_loadu_si128(iv);
  185. }
  186. static void aes_ni_setiv_sdctr(ssh_cipher *ciph, const void *iv)
  187. {
  188. aes_ni_context *ctx = container_of(ciph, aes_ni_context, ciph);
  189. __m128i counter = _mm_loadu_si128(iv);
  190. ctx->iv = aes_ni_sdctr_reverse(counter);
  191. }
  192. static void aes_ni_setiv_gcm(ssh_cipher *ciph, const void *iv)
  193. {
  194. aes_ni_context *ctx = container_of(ciph, aes_ni_context, ciph);
  195. __m128i counter = _mm_loadu_si128(iv);
  196. ctx->iv = aes_ni_sdctr_reverse(counter);
  197. ctx->iv = _mm_insert_epi32(ctx->iv, 1, 0);
  198. }
  199. static void aes_ni_next_message_gcm(ssh_cipher *ciph)
  200. {
  201. aes_ni_context *ctx = container_of(ciph, aes_ni_context, ciph);
  202. uint32_t fixed = _mm_extract_epi32(ctx->iv, 3);
  203. uint64_t msg_counter = _mm_extract_epi32(ctx->iv, 2);
  204. msg_counter <<= 32;
  205. msg_counter |= (uint32_t)_mm_extract_epi32(ctx->iv, 1);
  206. msg_counter++;
  207. ctx->iv = _mm_set_epi32(fixed, msg_counter >> 32, msg_counter, 1);
  208. }
  209. typedef __m128i (*aes_ni_fn)(__m128i v, const __m128i *keysched);
  210. static inline void aes_cbc_ni_encrypt(
  211. ssh_cipher *ciph, void *vblk, int blklen, aes_ni_fn encrypt)
  212. {
  213. aes_ni_context *ctx = container_of(ciph, aes_ni_context, ciph);
  214. for (uint8_t *blk = (uint8_t *)vblk, *finish = blk + blklen;
  215. blk < finish; blk += 16) {
  216. __m128i plaintext = _mm_loadu_si128((const __m128i *)blk);
  217. __m128i cipher_input = _mm_xor_si128(plaintext, ctx->iv);
  218. __m128i ciphertext = encrypt(cipher_input, ctx->keysched_e);
  219. _mm_storeu_si128((__m128i *)blk, ciphertext);
  220. ctx->iv = ciphertext;
  221. }
  222. }
  223. static inline void aes_cbc_ni_decrypt(
  224. ssh_cipher *ciph, void *vblk, int blklen, aes_ni_fn decrypt)
  225. {
  226. aes_ni_context *ctx = container_of(ciph, aes_ni_context, ciph);
  227. for (uint8_t *blk = (uint8_t *)vblk, *finish = blk + blklen;
  228. blk < finish; blk += 16) {
  229. __m128i ciphertext = _mm_loadu_si128((const __m128i *)blk);
  230. __m128i decrypted = decrypt(ciphertext, ctx->keysched_d);
  231. __m128i plaintext = _mm_xor_si128(decrypted, ctx->iv);
  232. _mm_storeu_si128((__m128i *)blk, plaintext);
  233. ctx->iv = ciphertext;
  234. }
  235. }
  236. static inline void aes_sdctr_ni(
  237. ssh_cipher *ciph, void *vblk, int blklen, aes_ni_fn encrypt)
  238. {
  239. aes_ni_context *ctx = container_of(ciph, aes_ni_context, ciph);
  240. for (uint8_t *blk = (uint8_t *)vblk, *finish = blk + blklen;
  241. blk < finish; blk += 16) {
  242. __m128i counter = aes_ni_sdctr_reverse(ctx->iv);
  243. __m128i keystream = encrypt(counter, ctx->keysched_e);
  244. __m128i input = _mm_loadu_si128((const __m128i *)blk);
  245. __m128i output = _mm_xor_si128(input, keystream);
  246. _mm_storeu_si128((__m128i *)blk, output);
  247. ctx->iv = aes_ni_sdctr_increment(ctx->iv);
  248. }
  249. }
  250. static inline void aes_encrypt_ecb_block_ni(
  251. ssh_cipher *ciph, void *blk, aes_ni_fn encrypt)
  252. {
  253. aes_ni_context *ctx = container_of(ciph, aes_ni_context, ciph);
  254. __m128i plaintext = _mm_loadu_si128(blk);
  255. __m128i ciphertext = encrypt(plaintext, ctx->keysched_e);
  256. _mm_storeu_si128(blk, ciphertext);
  257. }
  258. static inline void aes_gcm_ni(
  259. ssh_cipher *ciph, void *vblk, int blklen, aes_ni_fn encrypt)
  260. {
  261. aes_ni_context *ctx = container_of(ciph, aes_ni_context, ciph);
  262. for (uint8_t *blk = (uint8_t *)vblk, *finish = blk + blklen;
  263. blk < finish; blk += 16) {
  264. __m128i counter = aes_ni_sdctr_reverse(ctx->iv);
  265. __m128i keystream = encrypt(counter, ctx->keysched_e);
  266. __m128i input = _mm_loadu_si128((const __m128i *)blk);
  267. __m128i output = _mm_xor_si128(input, keystream);
  268. _mm_storeu_si128((__m128i *)blk, output);
  269. ctx->iv = aes_ni_gcm_increment(ctx->iv);
  270. }
  271. }
  272. #define NI_ENC_DEC(len) \
  273. static void aes##len##_ni_cbc_encrypt( \
  274. ssh_cipher *ciph, void *vblk, int blklen) \
  275. { aes_cbc_ni_encrypt(ciph, vblk, blklen, aes_ni_##len##_e); } \
  276. static void aes##len##_ni_cbc_decrypt( \
  277. ssh_cipher *ciph, void *vblk, int blklen) \
  278. { aes_cbc_ni_decrypt(ciph, vblk, blklen, aes_ni_##len##_d); } \
  279. static void aes##len##_ni_sdctr( \
  280. ssh_cipher *ciph, void *vblk, int blklen) \
  281. { aes_sdctr_ni(ciph, vblk, blklen, aes_ni_##len##_e); } \
  282. static void aes##len##_ni_gcm( \
  283. ssh_cipher *ciph, void *vblk, int blklen) \
  284. { aes_gcm_ni(ciph, vblk, blklen, aes_ni_##len##_e); } \
  285. static void aes##len##_ni_encrypt_ecb_block( \
  286. ssh_cipher *ciph, void *vblk) \
  287. { aes_encrypt_ecb_block_ni(ciph, vblk, aes_ni_##len##_e); }
  288. NI_ENC_DEC(128)
  289. NI_ENC_DEC(192)
  290. NI_ENC_DEC(256)
  291. AES_EXTRA(_ni);
  292. AES_ALL_VTABLES(_ni, "AES-NI accelerated");