aesni-intel_glue.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390
  1. /*
  2. * Support for Intel AES-NI instructions. This file contains glue
  3. * code, the real AES implementation is in intel-aes_asm.S.
  4. *
  5. * Copyright (C) 2008, Intel Corp.
  6. * Author: Huang Ying <ying.huang@intel.com>
  7. *
  8. * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
  9. * interface for 64-bit kernels.
  10. * Authors: Adrian Hoban <adrian.hoban@intel.com>
  11. * Gabriele Paoloni <gabriele.paoloni@intel.com>
  12. * Tadeusz Struk (tadeusz.struk@intel.com)
  13. * Aidan O'Mahony (aidan.o.mahony@intel.com)
  14. * Copyright (c) 2010, Intel Corporation.
  15. *
  16. * This program is free software; you can redistribute it and/or modify
  17. * it under the terms of the GNU General Public License as published by
  18. * the Free Software Foundation; either version 2 of the License, or
  19. * (at your option) any later version.
  20. */
  21. #include <linux/hardirq.h>
  22. #include <linux/types.h>
  23. #include <linux/crypto.h>
  24. #include <linux/module.h>
  25. #include <linux/err.h>
  26. #include <crypto/algapi.h>
  27. #include <crypto/aes.h>
  28. #include <crypto/cryptd.h>
  29. #include <crypto/ctr.h>
  30. #include <asm/cpu_device_id.h>
  31. #include <asm/i387.h>
  32. #include <asm/aes.h>
  33. #include <crypto/scatterwalk.h>
  34. #include <crypto/internal/aead.h>
  35. #include <linux/workqueue.h>
  36. #include <linux/spinlock.h>
  37. #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
  38. #define HAS_CTR
  39. #endif
  40. #if defined(CONFIG_CRYPTO_LRW) || defined(CONFIG_CRYPTO_LRW_MODULE)
  41. #define HAS_LRW
  42. #endif
  43. #if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
  44. #define HAS_PCBC
  45. #endif
  46. #if defined(CONFIG_CRYPTO_XTS) || defined(CONFIG_CRYPTO_XTS_MODULE)
  47. #define HAS_XTS
  48. #endif
  49. struct async_aes_ctx {
  50. struct cryptd_ablkcipher *cryptd_tfm;
  51. };
  52. /* This data is stored at the end of the crypto_tfm struct.
  53. * It's a type of per "session" data storage location.
  54. * This needs to be 16 byte aligned.
  55. */
  56. struct aesni_rfc4106_gcm_ctx {
  57. u8 hash_subkey[16];
  58. struct crypto_aes_ctx aes_key_expanded;
  59. u8 nonce[4];
  60. struct cryptd_aead *cryptd_tfm;
  61. };
  62. struct aesni_gcm_set_hash_subkey_result {
  63. int err;
  64. struct completion completion;
  65. };
  66. struct aesni_hash_subkey_req_data {
  67. u8 iv[16];
  68. struct aesni_gcm_set_hash_subkey_result result;
  69. struct scatterlist sg;
  70. };
  71. #define AESNI_ALIGN (16)
  72. #define AES_BLOCK_MASK (~(AES_BLOCK_SIZE-1))
  73. #define RFC4106_HASH_SUBKEY_SIZE 16
  74. asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
  75. unsigned int key_len);
  76. asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
  77. const u8 *in);
  78. asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
  79. const u8 *in);
  80. asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
  81. const u8 *in, unsigned int len);
  82. asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
  83. const u8 *in, unsigned int len);
  84. asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
  85. const u8 *in, unsigned int len, u8 *iv);
  86. asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
  87. const u8 *in, unsigned int len, u8 *iv);
  88. int crypto_fpu_init(void);
  89. void crypto_fpu_exit(void);
  90. #ifdef CONFIG_X86_64
  91. asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
  92. const u8 *in, unsigned int len, u8 *iv);
  93. /* asmlinkage void aesni_gcm_enc()
  94. * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
  95. * u8 *out, Ciphertext output. Encrypt in-place is allowed.
  96. * const u8 *in, Plaintext input
  97. * unsigned long plaintext_len, Length of data in bytes for encryption.
  98. * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
  99. * concatenated with 8 byte Initialisation Vector (from IPSec ESP
  100. * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
  101. * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
  102. * const u8 *aad, Additional Authentication Data (AAD)
  103. * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
  104. * is going to be 8 or 12 bytes
  105. * u8 *auth_tag, Authenticated Tag output.
  106. * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
  107. * Valid values are 16 (most likely), 12 or 8.
  108. */
  109. asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
  110. const u8 *in, unsigned long plaintext_len, u8 *iv,
  111. u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
  112. u8 *auth_tag, unsigned long auth_tag_len);
  113. /* asmlinkage void aesni_gcm_dec()
  114. * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
  115. * u8 *out, Plaintext output. Decrypt in-place is allowed.
  116. * const u8 *in, Ciphertext input
  117. * unsigned long ciphertext_len, Length of data in bytes for decryption.
  118. * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
  119. * concatenated with 8 byte Initialisation Vector (from IPSec ESP
  120. * Payload) concatenated with 0x00000001. 16-byte aligned pointer.
  121. * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
  122. * const u8 *aad, Additional Authentication Data (AAD)
  123. * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
  124. * to be 8 or 12 bytes
  125. * u8 *auth_tag, Authenticated Tag output.
  126. * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
  127. * Valid values are 16 (most likely), 12 or 8.
  128. */
  129. asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
  130. const u8 *in, unsigned long ciphertext_len, u8 *iv,
  131. u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
  132. u8 *auth_tag, unsigned long auth_tag_len);
  133. static inline struct
  134. aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
  135. {
  136. return
  137. (struct aesni_rfc4106_gcm_ctx *)
  138. PTR_ALIGN((u8 *)
  139. crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
  140. }
  141. #endif
  142. static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
  143. {
  144. unsigned long addr = (unsigned long)raw_ctx;
  145. unsigned long align = AESNI_ALIGN;
  146. if (align <= crypto_tfm_ctx_alignment())
  147. align = 1;
  148. return (struct crypto_aes_ctx *)ALIGN(addr, align);
  149. }
  150. static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
  151. const u8 *in_key, unsigned int key_len)
  152. {
  153. struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
  154. u32 *flags = &tfm->crt_flags;
  155. int err;
  156. if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
  157. key_len != AES_KEYSIZE_256) {
  158. *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  159. return -EINVAL;
  160. }
  161. if (!irq_fpu_usable())
  162. err = crypto_aes_expand_key(ctx, in_key, key_len);
  163. else {
  164. kernel_fpu_begin();
  165. err = aesni_set_key(ctx, in_key, key_len);
  166. kernel_fpu_end();
  167. }
  168. return err;
  169. }
  170. static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  171. unsigned int key_len)
  172. {
  173. return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
  174. }
  175. static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
  176. {
  177. struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
  178. if (!irq_fpu_usable())
  179. crypto_aes_encrypt_x86(ctx, dst, src);
  180. else {
  181. kernel_fpu_begin();
  182. aesni_enc(ctx, dst, src);
  183. kernel_fpu_end();
  184. }
  185. }
  186. static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
  187. {
  188. struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
  189. if (!irq_fpu_usable())
  190. crypto_aes_decrypt_x86(ctx, dst, src);
  191. else {
  192. kernel_fpu_begin();
  193. aesni_dec(ctx, dst, src);
  194. kernel_fpu_end();
  195. }
  196. }
  197. static struct crypto_alg aesni_alg = {
  198. .cra_name = "aes",
  199. .cra_driver_name = "aes-aesni",
  200. .cra_priority = 300,
  201. .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
  202. .cra_blocksize = AES_BLOCK_SIZE,
  203. .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
  204. .cra_alignmask = 0,
  205. .cra_module = THIS_MODULE,
  206. .cra_list = LIST_HEAD_INIT(aesni_alg.cra_list),
  207. .cra_u = {
  208. .cipher = {
  209. .cia_min_keysize = AES_MIN_KEY_SIZE,
  210. .cia_max_keysize = AES_MAX_KEY_SIZE,
  211. .cia_setkey = aes_set_key,
  212. .cia_encrypt = aes_encrypt,
  213. .cia_decrypt = aes_decrypt
  214. }
  215. }
  216. };
  217. static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
  218. {
  219. struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
  220. aesni_enc(ctx, dst, src);
  221. }
  222. static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
  223. {
  224. struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
  225. aesni_dec(ctx, dst, src);
  226. }
  227. static struct crypto_alg __aesni_alg = {
  228. .cra_name = "__aes-aesni",
  229. .cra_driver_name = "__driver-aes-aesni",
  230. .cra_priority = 0,
  231. .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
  232. .cra_blocksize = AES_BLOCK_SIZE,
  233. .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
  234. .cra_alignmask = 0,
  235. .cra_module = THIS_MODULE,
  236. .cra_list = LIST_HEAD_INIT(__aesni_alg.cra_list),
  237. .cra_u = {
  238. .cipher = {
  239. .cia_min_keysize = AES_MIN_KEY_SIZE,
  240. .cia_max_keysize = AES_MAX_KEY_SIZE,
  241. .cia_setkey = aes_set_key,
  242. .cia_encrypt = __aes_encrypt,
  243. .cia_decrypt = __aes_decrypt
  244. }
  245. }
  246. };
  247. static int ecb_encrypt(struct blkcipher_desc *desc,
  248. struct scatterlist *dst, struct scatterlist *src,
  249. unsigned int nbytes)
  250. {
  251. struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
  252. struct blkcipher_walk walk;
  253. int err;
  254. blkcipher_walk_init(&walk, dst, src, nbytes);
  255. err = blkcipher_walk_virt(desc, &walk);
  256. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  257. kernel_fpu_begin();
  258. while ((nbytes = walk.nbytes)) {
  259. aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
  260. nbytes & AES_BLOCK_MASK);
  261. nbytes &= AES_BLOCK_SIZE - 1;
  262. err = blkcipher_walk_done(desc, &walk, nbytes);
  263. }
  264. kernel_fpu_end();
  265. return err;
  266. }
  267. static int ecb_decrypt(struct blkcipher_desc *desc,
  268. struct scatterlist *dst, struct scatterlist *src,
  269. unsigned int nbytes)
  270. {
  271. struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
  272. struct blkcipher_walk walk;
  273. int err;
  274. blkcipher_walk_init(&walk, dst, src, nbytes);
  275. err = blkcipher_walk_virt(desc, &walk);
  276. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  277. kernel_fpu_begin();
  278. while ((nbytes = walk.nbytes)) {
  279. aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
  280. nbytes & AES_BLOCK_MASK);
  281. nbytes &= AES_BLOCK_SIZE - 1;
  282. err = blkcipher_walk_done(desc, &walk, nbytes);
  283. }
  284. kernel_fpu_end();
  285. return err;
  286. }
  287. static struct crypto_alg blk_ecb_alg = {
  288. .cra_name = "__ecb-aes-aesni",
  289. .cra_driver_name = "__driver-ecb-aes-aesni",
  290. .cra_priority = 0,
  291. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  292. .cra_blocksize = AES_BLOCK_SIZE,
  293. .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
  294. .cra_alignmask = 0,
  295. .cra_type = &crypto_blkcipher_type,
  296. .cra_module = THIS_MODULE,
  297. .cra_list = LIST_HEAD_INIT(blk_ecb_alg.cra_list),
  298. .cra_u = {
  299. .blkcipher = {
  300. .min_keysize = AES_MIN_KEY_SIZE,
  301. .max_keysize = AES_MAX_KEY_SIZE,
  302. .setkey = aes_set_key,
  303. .encrypt = ecb_encrypt,
  304. .decrypt = ecb_decrypt,
  305. },
  306. },
  307. };
  308. static int cbc_encrypt(struct blkcipher_desc *desc,
  309. struct scatterlist *dst, struct scatterlist *src,
  310. unsigned int nbytes)
  311. {
  312. struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
  313. struct blkcipher_walk walk;
  314. int err;
  315. blkcipher_walk_init(&walk, dst, src, nbytes);
  316. err = blkcipher_walk_virt(desc, &walk);
  317. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  318. kernel_fpu_begin();
  319. while ((nbytes = walk.nbytes)) {
  320. aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
  321. nbytes & AES_BLOCK_MASK, walk.iv);
  322. nbytes &= AES_BLOCK_SIZE - 1;
  323. err = blkcipher_walk_done(desc, &walk, nbytes);
  324. }
  325. kernel_fpu_end();
  326. return err;
  327. }
  328. static int cbc_decrypt(struct blkcipher_desc *desc,
  329. struct scatterlist *dst, struct scatterlist *src,
  330. unsigned int nbytes)
  331. {
  332. struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
  333. struct blkcipher_walk walk;
  334. int err;
  335. blkcipher_walk_init(&walk, dst, src, nbytes);
  336. err = blkcipher_walk_virt(desc, &walk);
  337. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  338. kernel_fpu_begin();
  339. while ((nbytes = walk.nbytes)) {
  340. aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
  341. nbytes & AES_BLOCK_MASK, walk.iv);
  342. nbytes &= AES_BLOCK_SIZE - 1;
  343. err = blkcipher_walk_done(desc, &walk, nbytes);
  344. }
  345. kernel_fpu_end();
  346. return err;
  347. }
  348. static struct crypto_alg blk_cbc_alg = {
  349. .cra_name = "__cbc-aes-aesni",
  350. .cra_driver_name = "__driver-cbc-aes-aesni",
  351. .cra_priority = 0,
  352. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  353. .cra_blocksize = AES_BLOCK_SIZE,
  354. .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
  355. .cra_alignmask = 0,
  356. .cra_type = &crypto_blkcipher_type,
  357. .cra_module = THIS_MODULE,
  358. .cra_list = LIST_HEAD_INIT(blk_cbc_alg.cra_list),
  359. .cra_u = {
  360. .blkcipher = {
  361. .min_keysize = AES_MIN_KEY_SIZE,
  362. .max_keysize = AES_MAX_KEY_SIZE,
  363. .setkey = aes_set_key,
  364. .encrypt = cbc_encrypt,
  365. .decrypt = cbc_decrypt,
  366. },
  367. },
  368. };
  369. #ifdef CONFIG_X86_64
  370. static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
  371. struct blkcipher_walk *walk)
  372. {
  373. u8 *ctrblk = walk->iv;
  374. u8 keystream[AES_BLOCK_SIZE];
  375. u8 *src = walk->src.virt.addr;
  376. u8 *dst = walk->dst.virt.addr;
  377. unsigned int nbytes = walk->nbytes;
  378. aesni_enc(ctx, keystream, ctrblk);
  379. crypto_xor(keystream, src, nbytes);
  380. memcpy(dst, keystream, nbytes);
  381. crypto_inc(ctrblk, AES_BLOCK_SIZE);
  382. }
  383. static int ctr_crypt(struct blkcipher_desc *desc,
  384. struct scatterlist *dst, struct scatterlist *src,
  385. unsigned int nbytes)
  386. {
  387. struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm));
  388. struct blkcipher_walk walk;
  389. int err;
  390. blkcipher_walk_init(&walk, dst, src, nbytes);
  391. err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
  392. desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  393. kernel_fpu_begin();
  394. while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
  395. aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
  396. nbytes & AES_BLOCK_MASK, walk.iv);
  397. nbytes &= AES_BLOCK_SIZE - 1;
  398. err = blkcipher_walk_done(desc, &walk, nbytes);
  399. }
  400. if (walk.nbytes) {
  401. ctr_crypt_final(ctx, &walk);
  402. err = blkcipher_walk_done(desc, &walk, 0);
  403. }
  404. kernel_fpu_end();
  405. return err;
  406. }
  407. static struct crypto_alg blk_ctr_alg = {
  408. .cra_name = "__ctr-aes-aesni",
  409. .cra_driver_name = "__driver-ctr-aes-aesni",
  410. .cra_priority = 0,
  411. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
  412. .cra_blocksize = 1,
  413. .cra_ctxsize = sizeof(struct crypto_aes_ctx)+AESNI_ALIGN-1,
  414. .cra_alignmask = 0,
  415. .cra_type = &crypto_blkcipher_type,
  416. .cra_module = THIS_MODULE,
  417. .cra_list = LIST_HEAD_INIT(blk_ctr_alg.cra_list),
  418. .cra_u = {
  419. .blkcipher = {
  420. .min_keysize = AES_MIN_KEY_SIZE,
  421. .max_keysize = AES_MAX_KEY_SIZE,
  422. .ivsize = AES_BLOCK_SIZE,
  423. .setkey = aes_set_key,
  424. .encrypt = ctr_crypt,
  425. .decrypt = ctr_crypt,
  426. },
  427. },
  428. };
  429. #endif
  430. static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
  431. unsigned int key_len)
  432. {
  433. struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  434. struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
  435. int err;
  436. crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  437. crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
  438. & CRYPTO_TFM_REQ_MASK);
  439. err = crypto_ablkcipher_setkey(child, key, key_len);
  440. crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
  441. & CRYPTO_TFM_RES_MASK);
  442. return err;
  443. }
  444. static int ablk_encrypt(struct ablkcipher_request *req)
  445. {
  446. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  447. struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  448. if (!irq_fpu_usable()) {
  449. struct ablkcipher_request *cryptd_req =
  450. ablkcipher_request_ctx(req);
  451. memcpy(cryptd_req, req, sizeof(*req));
  452. ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
  453. return crypto_ablkcipher_encrypt(cryptd_req);
  454. } else {
  455. struct blkcipher_desc desc;
  456. desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
  457. desc.info = req->info;
  458. desc.flags = 0;
  459. return crypto_blkcipher_crt(desc.tfm)->encrypt(
  460. &desc, req->dst, req->src, req->nbytes);
  461. }
  462. }
  463. static int ablk_decrypt(struct ablkcipher_request *req)
  464. {
  465. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  466. struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  467. if (!irq_fpu_usable()) {
  468. struct ablkcipher_request *cryptd_req =
  469. ablkcipher_request_ctx(req);
  470. memcpy(cryptd_req, req, sizeof(*req));
  471. ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
  472. return crypto_ablkcipher_decrypt(cryptd_req);
  473. } else {
  474. struct blkcipher_desc desc;
  475. desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
  476. desc.info = req->info;
  477. desc.flags = 0;
  478. return crypto_blkcipher_crt(desc.tfm)->decrypt(
  479. &desc, req->dst, req->src, req->nbytes);
  480. }
  481. }
  482. static void ablk_exit(struct crypto_tfm *tfm)
  483. {
  484. struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  485. cryptd_free_ablkcipher(ctx->cryptd_tfm);
  486. }
  487. static void ablk_init_common(struct crypto_tfm *tfm,
  488. struct cryptd_ablkcipher *cryptd_tfm)
  489. {
  490. struct async_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  491. ctx->cryptd_tfm = cryptd_tfm;
  492. tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
  493. crypto_ablkcipher_reqsize(&cryptd_tfm->base);
  494. }
  495. static int ablk_ecb_init(struct crypto_tfm *tfm)
  496. {
  497. struct cryptd_ablkcipher *cryptd_tfm;
  498. cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ecb-aes-aesni", 0, 0);
  499. if (IS_ERR(cryptd_tfm))
  500. return PTR_ERR(cryptd_tfm);
  501. ablk_init_common(tfm, cryptd_tfm);
  502. return 0;
  503. }
  504. static struct crypto_alg ablk_ecb_alg = {
  505. .cra_name = "ecb(aes)",
  506. .cra_driver_name = "ecb-aes-aesni",
  507. .cra_priority = 400,
  508. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
  509. .cra_blocksize = AES_BLOCK_SIZE,
  510. .cra_ctxsize = sizeof(struct async_aes_ctx),
  511. .cra_alignmask = 0,
  512. .cra_type = &crypto_ablkcipher_type,
  513. .cra_module = THIS_MODULE,
  514. .cra_list = LIST_HEAD_INIT(ablk_ecb_alg.cra_list),
  515. .cra_init = ablk_ecb_init,
  516. .cra_exit = ablk_exit,
  517. .cra_u = {
  518. .ablkcipher = {
  519. .min_keysize = AES_MIN_KEY_SIZE,
  520. .max_keysize = AES_MAX_KEY_SIZE,
  521. .setkey = ablk_set_key,
  522. .encrypt = ablk_encrypt,
  523. .decrypt = ablk_decrypt,
  524. },
  525. },
  526. };
  527. static int ablk_cbc_init(struct crypto_tfm *tfm)
  528. {
  529. struct cryptd_ablkcipher *cryptd_tfm;
  530. cryptd_tfm = cryptd_alloc_ablkcipher("__driver-cbc-aes-aesni", 0, 0);
  531. if (IS_ERR(cryptd_tfm))
  532. return PTR_ERR(cryptd_tfm);
  533. ablk_init_common(tfm, cryptd_tfm);
  534. return 0;
  535. }
  536. static struct crypto_alg ablk_cbc_alg = {
  537. .cra_name = "cbc(aes)",
  538. .cra_driver_name = "cbc-aes-aesni",
  539. .cra_priority = 400,
  540. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
  541. .cra_blocksize = AES_BLOCK_SIZE,
  542. .cra_ctxsize = sizeof(struct async_aes_ctx),
  543. .cra_alignmask = 0,
  544. .cra_type = &crypto_ablkcipher_type,
  545. .cra_module = THIS_MODULE,
  546. .cra_list = LIST_HEAD_INIT(ablk_cbc_alg.cra_list),
  547. .cra_init = ablk_cbc_init,
  548. .cra_exit = ablk_exit,
  549. .cra_u = {
  550. .ablkcipher = {
  551. .min_keysize = AES_MIN_KEY_SIZE,
  552. .max_keysize = AES_MAX_KEY_SIZE,
  553. .ivsize = AES_BLOCK_SIZE,
  554. .setkey = ablk_set_key,
  555. .encrypt = ablk_encrypt,
  556. .decrypt = ablk_decrypt,
  557. },
  558. },
  559. };
  560. #ifdef CONFIG_X86_64
  561. static int ablk_ctr_init(struct crypto_tfm *tfm)
  562. {
  563. struct cryptd_ablkcipher *cryptd_tfm;
  564. cryptd_tfm = cryptd_alloc_ablkcipher("__driver-ctr-aes-aesni", 0, 0);
  565. if (IS_ERR(cryptd_tfm))
  566. return PTR_ERR(cryptd_tfm);
  567. ablk_init_common(tfm, cryptd_tfm);
  568. return 0;
  569. }
  570. static struct crypto_alg ablk_ctr_alg = {
  571. .cra_name = "ctr(aes)",
  572. .cra_driver_name = "ctr-aes-aesni",
  573. .cra_priority = 400,
  574. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
  575. .cra_blocksize = 1,
  576. .cra_ctxsize = sizeof(struct async_aes_ctx),
  577. .cra_alignmask = 0,
  578. .cra_type = &crypto_ablkcipher_type,
  579. .cra_module = THIS_MODULE,
  580. .cra_list = LIST_HEAD_INIT(ablk_ctr_alg.cra_list),
  581. .cra_init = ablk_ctr_init,
  582. .cra_exit = ablk_exit,
  583. .cra_u = {
  584. .ablkcipher = {
  585. .min_keysize = AES_MIN_KEY_SIZE,
  586. .max_keysize = AES_MAX_KEY_SIZE,
  587. .ivsize = AES_BLOCK_SIZE,
  588. .setkey = ablk_set_key,
  589. .encrypt = ablk_encrypt,
  590. .decrypt = ablk_encrypt,
  591. .geniv = "chainiv",
  592. },
  593. },
  594. };
  595. #ifdef HAS_CTR
  596. static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
  597. {
  598. struct cryptd_ablkcipher *cryptd_tfm;
  599. cryptd_tfm = cryptd_alloc_ablkcipher(
  600. "rfc3686(__driver-ctr-aes-aesni)", 0, 0);
  601. if (IS_ERR(cryptd_tfm))
  602. return PTR_ERR(cryptd_tfm);
  603. ablk_init_common(tfm, cryptd_tfm);
  604. return 0;
  605. }
  606. static struct crypto_alg ablk_rfc3686_ctr_alg = {
  607. .cra_name = "rfc3686(ctr(aes))",
  608. .cra_driver_name = "rfc3686-ctr-aes-aesni",
  609. .cra_priority = 400,
  610. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
  611. .cra_blocksize = 1,
  612. .cra_ctxsize = sizeof(struct async_aes_ctx),
  613. .cra_alignmask = 0,
  614. .cra_type = &crypto_ablkcipher_type,
  615. .cra_module = THIS_MODULE,
  616. .cra_list = LIST_HEAD_INIT(ablk_rfc3686_ctr_alg.cra_list),
  617. .cra_init = ablk_rfc3686_ctr_init,
  618. .cra_exit = ablk_exit,
  619. .cra_u = {
  620. .ablkcipher = {
  621. .min_keysize = AES_MIN_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
  622. .max_keysize = AES_MAX_KEY_SIZE+CTR_RFC3686_NONCE_SIZE,
  623. .ivsize = CTR_RFC3686_IV_SIZE,
  624. .setkey = ablk_set_key,
  625. .encrypt = ablk_encrypt,
  626. .decrypt = ablk_decrypt,
  627. .geniv = "seqiv",
  628. },
  629. },
  630. };
  631. #endif
  632. #endif
  633. #ifdef HAS_LRW
  634. static int ablk_lrw_init(struct crypto_tfm *tfm)
  635. {
  636. struct cryptd_ablkcipher *cryptd_tfm;
  637. cryptd_tfm = cryptd_alloc_ablkcipher("fpu(lrw(__driver-aes-aesni))",
  638. 0, 0);
  639. if (IS_ERR(cryptd_tfm))
  640. return PTR_ERR(cryptd_tfm);
  641. ablk_init_common(tfm, cryptd_tfm);
  642. return 0;
  643. }
  644. static struct crypto_alg ablk_lrw_alg = {
  645. .cra_name = "lrw(aes)",
  646. .cra_driver_name = "lrw-aes-aesni",
  647. .cra_priority = 400,
  648. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
  649. .cra_blocksize = AES_BLOCK_SIZE,
  650. .cra_ctxsize = sizeof(struct async_aes_ctx),
  651. .cra_alignmask = 0,
  652. .cra_type = &crypto_ablkcipher_type,
  653. .cra_module = THIS_MODULE,
  654. .cra_list = LIST_HEAD_INIT(ablk_lrw_alg.cra_list),
  655. .cra_init = ablk_lrw_init,
  656. .cra_exit = ablk_exit,
  657. .cra_u = {
  658. .ablkcipher = {
  659. .min_keysize = AES_MIN_KEY_SIZE + AES_BLOCK_SIZE,
  660. .max_keysize = AES_MAX_KEY_SIZE + AES_BLOCK_SIZE,
  661. .ivsize = AES_BLOCK_SIZE,
  662. .setkey = ablk_set_key,
  663. .encrypt = ablk_encrypt,
  664. .decrypt = ablk_decrypt,
  665. },
  666. },
  667. };
  668. #endif
  669. #ifdef HAS_PCBC
  670. static int ablk_pcbc_init(struct crypto_tfm *tfm)
  671. {
  672. struct cryptd_ablkcipher *cryptd_tfm;
  673. cryptd_tfm = cryptd_alloc_ablkcipher("fpu(pcbc(__driver-aes-aesni))",
  674. 0, 0);
  675. if (IS_ERR(cryptd_tfm))
  676. return PTR_ERR(cryptd_tfm);
  677. ablk_init_common(tfm, cryptd_tfm);
  678. return 0;
  679. }
  680. static struct crypto_alg ablk_pcbc_alg = {
  681. .cra_name = "pcbc(aes)",
  682. .cra_driver_name = "pcbc-aes-aesni",
  683. .cra_priority = 400,
  684. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
  685. .cra_blocksize = AES_BLOCK_SIZE,
  686. .cra_ctxsize = sizeof(struct async_aes_ctx),
  687. .cra_alignmask = 0,
  688. .cra_type = &crypto_ablkcipher_type,
  689. .cra_module = THIS_MODULE,
  690. .cra_list = LIST_HEAD_INIT(ablk_pcbc_alg.cra_list),
  691. .cra_init = ablk_pcbc_init,
  692. .cra_exit = ablk_exit,
  693. .cra_u = {
  694. .ablkcipher = {
  695. .min_keysize = AES_MIN_KEY_SIZE,
  696. .max_keysize = AES_MAX_KEY_SIZE,
  697. .ivsize = AES_BLOCK_SIZE,
  698. .setkey = ablk_set_key,
  699. .encrypt = ablk_encrypt,
  700. .decrypt = ablk_decrypt,
  701. },
  702. },
  703. };
  704. #endif
  705. #ifdef HAS_XTS
  706. static int ablk_xts_init(struct crypto_tfm *tfm)
  707. {
  708. struct cryptd_ablkcipher *cryptd_tfm;
  709. cryptd_tfm = cryptd_alloc_ablkcipher("fpu(xts(__driver-aes-aesni))",
  710. 0, 0);
  711. if (IS_ERR(cryptd_tfm))
  712. return PTR_ERR(cryptd_tfm);
  713. ablk_init_common(tfm, cryptd_tfm);
  714. return 0;
  715. }
  716. static struct crypto_alg ablk_xts_alg = {
  717. .cra_name = "xts(aes)",
  718. .cra_driver_name = "xts-aes-aesni",
  719. .cra_priority = 400,
  720. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
  721. .cra_blocksize = AES_BLOCK_SIZE,
  722. .cra_ctxsize = sizeof(struct async_aes_ctx),
  723. .cra_alignmask = 0,
  724. .cra_type = &crypto_ablkcipher_type,
  725. .cra_module = THIS_MODULE,
  726. .cra_list = LIST_HEAD_INIT(ablk_xts_alg.cra_list),
  727. .cra_init = ablk_xts_init,
  728. .cra_exit = ablk_exit,
  729. .cra_u = {
  730. .ablkcipher = {
  731. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  732. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  733. .ivsize = AES_BLOCK_SIZE,
  734. .setkey = ablk_set_key,
  735. .encrypt = ablk_encrypt,
  736. .decrypt = ablk_decrypt,
  737. },
  738. },
  739. };
  740. #endif
  741. #ifdef CONFIG_X86_64
  742. static int rfc4106_init(struct crypto_tfm *tfm)
  743. {
  744. struct cryptd_aead *cryptd_tfm;
  745. struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
  746. PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
  747. struct crypto_aead *cryptd_child;
  748. struct aesni_rfc4106_gcm_ctx *child_ctx;
  749. cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
  750. if (IS_ERR(cryptd_tfm))
  751. return PTR_ERR(cryptd_tfm);
  752. cryptd_child = cryptd_aead_child(cryptd_tfm);
  753. child_ctx = aesni_rfc4106_gcm_ctx_get(cryptd_child);
  754. memcpy(child_ctx, ctx, sizeof(*ctx));
  755. ctx->cryptd_tfm = cryptd_tfm;
  756. tfm->crt_aead.reqsize = sizeof(struct aead_request)
  757. + crypto_aead_reqsize(&cryptd_tfm->base);
  758. return 0;
  759. }
  760. static void rfc4106_exit(struct crypto_tfm *tfm)
  761. {
  762. struct aesni_rfc4106_gcm_ctx *ctx =
  763. (struct aesni_rfc4106_gcm_ctx *)
  764. PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
  765. if (!IS_ERR(ctx->cryptd_tfm))
  766. cryptd_free_aead(ctx->cryptd_tfm);
  767. return;
  768. }
  769. static void
  770. rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
  771. {
  772. struct aesni_gcm_set_hash_subkey_result *result = req->data;
  773. if (err == -EINPROGRESS)
  774. return;
  775. result->err = err;
  776. complete(&result->completion);
  777. }
  778. static int
  779. rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
  780. {
  781. struct crypto_ablkcipher *ctr_tfm;
  782. struct ablkcipher_request *req;
  783. int ret = -EINVAL;
  784. struct aesni_hash_subkey_req_data *req_data;
  785. ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
  786. if (IS_ERR(ctr_tfm))
  787. return PTR_ERR(ctr_tfm);
  788. crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
  789. ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
  790. if (ret)
  791. goto out_free_ablkcipher;
  792. ret = -ENOMEM;
  793. req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
  794. if (!req)
  795. goto out_free_ablkcipher;
  796. req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
  797. if (!req_data)
  798. goto out_free_request;
  799. memset(req_data->iv, 0, sizeof(req_data->iv));
  800. /* Clear the data in the hash sub key container to zero.*/
  801. /* We want to cipher all zeros to create the hash sub key. */
  802. memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
  803. init_completion(&req_data->result.completion);
  804. sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
  805. ablkcipher_request_set_tfm(req, ctr_tfm);
  806. ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
  807. CRYPTO_TFM_REQ_MAY_BACKLOG,
  808. rfc4106_set_hash_subkey_done,
  809. &req_data->result);
  810. ablkcipher_request_set_crypt(req, &req_data->sg,
  811. &req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
  812. ret = crypto_ablkcipher_encrypt(req);
  813. if (ret == -EINPROGRESS || ret == -EBUSY) {
  814. ret = wait_for_completion_interruptible
  815. (&req_data->result.completion);
  816. if (!ret)
  817. ret = req_data->result.err;
  818. }
  819. kfree(req_data);
  820. out_free_request:
  821. ablkcipher_request_free(req);
  822. out_free_ablkcipher:
  823. crypto_free_ablkcipher(ctr_tfm);
  824. return ret;
  825. }
  826. static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
  827. unsigned int key_len)
  828. {
  829. int ret = 0;
  830. struct crypto_tfm *tfm = crypto_aead_tfm(parent);
  831. struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
  832. struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
  833. struct aesni_rfc4106_gcm_ctx *child_ctx =
  834. aesni_rfc4106_gcm_ctx_get(cryptd_child);
  835. u8 *new_key_mem = NULL;
  836. if (key_len < 4) {
  837. crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  838. return -EINVAL;
  839. }
  840. /*Account for 4 byte nonce at the end.*/
  841. key_len -= 4;
  842. if (key_len != AES_KEYSIZE_128) {
  843. crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  844. return -EINVAL;
  845. }
  846. memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
  847. /*This must be on a 16 byte boundary!*/
  848. if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
  849. return -EINVAL;
  850. if ((unsigned long)key % AESNI_ALIGN) {
  851. /*key is not aligned: use an auxuliar aligned pointer*/
  852. new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
  853. if (!new_key_mem)
  854. return -ENOMEM;
  855. new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
  856. memcpy(new_key_mem, key, key_len);
  857. key = new_key_mem;
  858. }
  859. if (!irq_fpu_usable())
  860. ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
  861. key, key_len);
  862. else {
  863. kernel_fpu_begin();
  864. ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
  865. kernel_fpu_end();
  866. }
  867. /*This must be on a 16 byte boundary!*/
  868. if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
  869. ret = -EINVAL;
  870. goto exit;
  871. }
  872. ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
  873. memcpy(child_ctx, ctx, sizeof(*ctx));
  874. exit:
  875. kfree(new_key_mem);
  876. return ret;
  877. }
  878. /* This is the Integrity Check Value (aka the authentication tag length and can
  879. * be 8, 12 or 16 bytes long. */
  880. static int rfc4106_set_authsize(struct crypto_aead *parent,
  881. unsigned int authsize)
  882. {
  883. struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
  884. struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
  885. switch (authsize) {
  886. case 8:
  887. case 12:
  888. case 16:
  889. break;
  890. default:
  891. return -EINVAL;
  892. }
  893. crypto_aead_crt(parent)->authsize = authsize;
  894. crypto_aead_crt(cryptd_child)->authsize = authsize;
  895. return 0;
  896. }
  897. static int rfc4106_encrypt(struct aead_request *req)
  898. {
  899. int ret;
  900. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  901. struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
  902. if (!irq_fpu_usable()) {
  903. struct aead_request *cryptd_req =
  904. (struct aead_request *) aead_request_ctx(req);
  905. memcpy(cryptd_req, req, sizeof(*req));
  906. aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
  907. return crypto_aead_encrypt(cryptd_req);
  908. } else {
  909. struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
  910. kernel_fpu_begin();
  911. ret = cryptd_child->base.crt_aead.encrypt(req);
  912. kernel_fpu_end();
  913. return ret;
  914. }
  915. }
  916. static int rfc4106_decrypt(struct aead_request *req)
  917. {
  918. int ret;
  919. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  920. struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
  921. if (!irq_fpu_usable()) {
  922. struct aead_request *cryptd_req =
  923. (struct aead_request *) aead_request_ctx(req);
  924. memcpy(cryptd_req, req, sizeof(*req));
  925. aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
  926. return crypto_aead_decrypt(cryptd_req);
  927. } else {
  928. struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
  929. kernel_fpu_begin();
  930. ret = cryptd_child->base.crt_aead.decrypt(req);
  931. kernel_fpu_end();
  932. return ret;
  933. }
  934. }
  935. static struct crypto_alg rfc4106_alg = {
  936. .cra_name = "rfc4106(gcm(aes))",
  937. .cra_driver_name = "rfc4106-gcm-aesni",
  938. .cra_priority = 400,
  939. .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
  940. .cra_blocksize = 1,
  941. .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
  942. .cra_alignmask = 0,
  943. .cra_type = &crypto_nivaead_type,
  944. .cra_module = THIS_MODULE,
  945. .cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
  946. .cra_init = rfc4106_init,
  947. .cra_exit = rfc4106_exit,
  948. .cra_u = {
  949. .aead = {
  950. .setkey = rfc4106_set_key,
  951. .setauthsize = rfc4106_set_authsize,
  952. .encrypt = rfc4106_encrypt,
  953. .decrypt = rfc4106_decrypt,
  954. .geniv = "seqiv",
  955. .ivsize = 8,
  956. .maxauthsize = 16,
  957. },
  958. },
  959. };
  960. static int __driver_rfc4106_encrypt(struct aead_request *req)
  961. {
  962. u8 one_entry_in_sg = 0;
  963. u8 *src, *dst, *assoc;
  964. __be32 counter = cpu_to_be32(1);
  965. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  966. struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
  967. void *aes_ctx = &(ctx->aes_key_expanded);
  968. unsigned long auth_tag_len = crypto_aead_authsize(tfm);
  969. u8 iv_tab[16+AESNI_ALIGN];
  970. u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
  971. struct scatter_walk src_sg_walk;
  972. struct scatter_walk assoc_sg_walk;
  973. struct scatter_walk dst_sg_walk;
  974. unsigned int i;
  975. /* Assuming we are supporting rfc4106 64-bit extended */
  976. /* sequence numbers We need to have the AAD length equal */
  977. /* to 8 or 12 bytes */
  978. if (unlikely(req->assoclen != 8 && req->assoclen != 12))
  979. return -EINVAL;
  980. /* IV below built */
  981. for (i = 0; i < 4; i++)
  982. *(iv+i) = ctx->nonce[i];
  983. for (i = 0; i < 8; i++)
  984. *(iv+4+i) = req->iv[i];
  985. *((__be32 *)(iv+12)) = counter;
  986. if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
  987. one_entry_in_sg = 1;
  988. scatterwalk_start(&src_sg_walk, req->src);
  989. scatterwalk_start(&assoc_sg_walk, req->assoc);
  990. src = scatterwalk_map(&src_sg_walk);
  991. assoc = scatterwalk_map(&assoc_sg_walk);
  992. dst = src;
  993. if (unlikely(req->src != req->dst)) {
  994. scatterwalk_start(&dst_sg_walk, req->dst);
  995. dst = scatterwalk_map(&dst_sg_walk);
  996. }
  997. } else {
  998. /* Allocate memory for src, dst, assoc */
  999. src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
  1000. GFP_ATOMIC);
  1001. if (unlikely(!src))
  1002. return -ENOMEM;
  1003. assoc = (src + req->cryptlen + auth_tag_len);
  1004. scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
  1005. scatterwalk_map_and_copy(assoc, req->assoc, 0,
  1006. req->assoclen, 0);
  1007. dst = src;
  1008. }
  1009. aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
  1010. ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
  1011. + ((unsigned long)req->cryptlen), auth_tag_len);
  1012. /* The authTag (aka the Integrity Check Value) needs to be written
  1013. * back to the packet. */
  1014. if (one_entry_in_sg) {
  1015. if (unlikely(req->src != req->dst)) {
  1016. scatterwalk_unmap(dst);
  1017. scatterwalk_done(&dst_sg_walk, 0, 0);
  1018. }
  1019. scatterwalk_unmap(src);
  1020. scatterwalk_unmap(assoc);
  1021. scatterwalk_done(&src_sg_walk, 0, 0);
  1022. scatterwalk_done(&assoc_sg_walk, 0, 0);
  1023. } else {
  1024. scatterwalk_map_and_copy(dst, req->dst, 0,
  1025. req->cryptlen + auth_tag_len, 1);
  1026. kfree(src);
  1027. }
  1028. return 0;
  1029. }
  1030. static int __driver_rfc4106_decrypt(struct aead_request *req)
  1031. {
  1032. u8 one_entry_in_sg = 0;
  1033. u8 *src, *dst, *assoc;
  1034. unsigned long tempCipherLen = 0;
  1035. __be32 counter = cpu_to_be32(1);
  1036. int retval = 0;
  1037. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1038. struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
  1039. void *aes_ctx = &(ctx->aes_key_expanded);
  1040. unsigned long auth_tag_len = crypto_aead_authsize(tfm);
  1041. u8 iv_and_authTag[32+AESNI_ALIGN];
  1042. u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
  1043. u8 *authTag = iv + 16;
  1044. struct scatter_walk src_sg_walk;
  1045. struct scatter_walk assoc_sg_walk;
  1046. struct scatter_walk dst_sg_walk;
  1047. unsigned int i;
  1048. if (unlikely((req->cryptlen < auth_tag_len) ||
  1049. (req->assoclen != 8 && req->assoclen != 12)))
  1050. return -EINVAL;
  1051. /* Assuming we are supporting rfc4106 64-bit extended */
  1052. /* sequence numbers We need to have the AAD length */
  1053. /* equal to 8 or 12 bytes */
  1054. tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
  1055. /* IV below built */
  1056. for (i = 0; i < 4; i++)
  1057. *(iv+i) = ctx->nonce[i];
  1058. for (i = 0; i < 8; i++)
  1059. *(iv+4+i) = req->iv[i];
  1060. *((__be32 *)(iv+12)) = counter;
  1061. if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
  1062. one_entry_in_sg = 1;
  1063. scatterwalk_start(&src_sg_walk, req->src);
  1064. scatterwalk_start(&assoc_sg_walk, req->assoc);
  1065. src = scatterwalk_map(&src_sg_walk);
  1066. assoc = scatterwalk_map(&assoc_sg_walk);
  1067. dst = src;
  1068. if (unlikely(req->src != req->dst)) {
  1069. scatterwalk_start(&dst_sg_walk, req->dst);
  1070. dst = scatterwalk_map(&dst_sg_walk);
  1071. }
  1072. } else {
  1073. /* Allocate memory for src, dst, assoc */
  1074. src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
  1075. if (!src)
  1076. return -ENOMEM;
  1077. assoc = (src + req->cryptlen);
  1078. scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
  1079. scatterwalk_map_and_copy(assoc, req->assoc, 0,
  1080. req->assoclen, 0);
  1081. dst = src;
  1082. }
  1083. aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
  1084. ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
  1085. authTag, auth_tag_len);
  1086. /* Compare generated tag with passed in tag. */
  1087. retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
  1088. -EBADMSG : 0;
  1089. if (one_entry_in_sg) {
  1090. if (unlikely(req->src != req->dst)) {
  1091. scatterwalk_unmap(dst);
  1092. scatterwalk_done(&dst_sg_walk, 0, 0);
  1093. }
  1094. scatterwalk_unmap(src);
  1095. scatterwalk_unmap(assoc);
  1096. scatterwalk_done(&src_sg_walk, 0, 0);
  1097. scatterwalk_done(&assoc_sg_walk, 0, 0);
  1098. } else {
  1099. scatterwalk_map_and_copy(dst, req->dst, 0, tempCipherLen, 1);
  1100. kfree(src);
  1101. }
  1102. return retval;
  1103. }
  1104. static struct crypto_alg __rfc4106_alg = {
  1105. .cra_name = "__gcm-aes-aesni",
  1106. .cra_driver_name = "__driver-gcm-aes-aesni",
  1107. .cra_priority = 0,
  1108. .cra_flags = CRYPTO_ALG_TYPE_AEAD,
  1109. .cra_blocksize = 1,
  1110. .cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
  1111. .cra_alignmask = 0,
  1112. .cra_type = &crypto_aead_type,
  1113. .cra_module = THIS_MODULE,
  1114. .cra_list = LIST_HEAD_INIT(__rfc4106_alg.cra_list),
  1115. .cra_u = {
  1116. .aead = {
  1117. .encrypt = __driver_rfc4106_encrypt,
  1118. .decrypt = __driver_rfc4106_decrypt,
  1119. },
  1120. },
  1121. };
  1122. #endif
  1123. static const struct x86_cpu_id aesni_cpu_id[] = {
  1124. X86_FEATURE_MATCH(X86_FEATURE_AES),
  1125. {}
  1126. };
  1127. MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
  1128. static int __init aesni_init(void)
  1129. {
  1130. int err;
  1131. if (!x86_match_cpu(aesni_cpu_id))
  1132. return -ENODEV;
  1133. if ((err = crypto_fpu_init()))
  1134. goto fpu_err;
  1135. if ((err = crypto_register_alg(&aesni_alg)))
  1136. goto aes_err;
  1137. if ((err = crypto_register_alg(&__aesni_alg)))
  1138. goto __aes_err;
  1139. if ((err = crypto_register_alg(&blk_ecb_alg)))
  1140. goto blk_ecb_err;
  1141. if ((err = crypto_register_alg(&blk_cbc_alg)))
  1142. goto blk_cbc_err;
  1143. if ((err = crypto_register_alg(&ablk_ecb_alg)))
  1144. goto ablk_ecb_err;
  1145. if ((err = crypto_register_alg(&ablk_cbc_alg)))
  1146. goto ablk_cbc_err;
  1147. #ifdef CONFIG_X86_64
  1148. if ((err = crypto_register_alg(&blk_ctr_alg)))
  1149. goto blk_ctr_err;
  1150. if ((err = crypto_register_alg(&ablk_ctr_alg)))
  1151. goto ablk_ctr_err;
  1152. if ((err = crypto_register_alg(&__rfc4106_alg)))
  1153. goto __aead_gcm_err;
  1154. if ((err = crypto_register_alg(&rfc4106_alg)))
  1155. goto aead_gcm_err;
  1156. #ifdef HAS_CTR
  1157. if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
  1158. goto ablk_rfc3686_ctr_err;
  1159. #endif
  1160. #endif
  1161. #ifdef HAS_LRW
  1162. if ((err = crypto_register_alg(&ablk_lrw_alg)))
  1163. goto ablk_lrw_err;
  1164. #endif
  1165. #ifdef HAS_PCBC
  1166. if ((err = crypto_register_alg(&ablk_pcbc_alg)))
  1167. goto ablk_pcbc_err;
  1168. #endif
  1169. #ifdef HAS_XTS
  1170. if ((err = crypto_register_alg(&ablk_xts_alg)))
  1171. goto ablk_xts_err;
  1172. #endif
  1173. return err;
  1174. #ifdef HAS_XTS
  1175. ablk_xts_err:
  1176. #endif
  1177. #ifdef HAS_PCBC
  1178. crypto_unregister_alg(&ablk_pcbc_alg);
  1179. ablk_pcbc_err:
  1180. #endif
  1181. #ifdef HAS_LRW
  1182. crypto_unregister_alg(&ablk_lrw_alg);
  1183. ablk_lrw_err:
  1184. #endif
  1185. #ifdef CONFIG_X86_64
  1186. #ifdef HAS_CTR
  1187. crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
  1188. ablk_rfc3686_ctr_err:
  1189. #endif
  1190. crypto_unregister_alg(&rfc4106_alg);
  1191. aead_gcm_err:
  1192. crypto_unregister_alg(&__rfc4106_alg);
  1193. __aead_gcm_err:
  1194. crypto_unregister_alg(&ablk_ctr_alg);
  1195. ablk_ctr_err:
  1196. crypto_unregister_alg(&blk_ctr_alg);
  1197. blk_ctr_err:
  1198. #endif
  1199. crypto_unregister_alg(&ablk_cbc_alg);
  1200. ablk_cbc_err:
  1201. crypto_unregister_alg(&ablk_ecb_alg);
  1202. ablk_ecb_err:
  1203. crypto_unregister_alg(&blk_cbc_alg);
  1204. blk_cbc_err:
  1205. crypto_unregister_alg(&blk_ecb_alg);
  1206. blk_ecb_err:
  1207. crypto_unregister_alg(&__aesni_alg);
  1208. __aes_err:
  1209. crypto_unregister_alg(&aesni_alg);
  1210. aes_err:
  1211. fpu_err:
  1212. return err;
  1213. }
  1214. static void __exit aesni_exit(void)
  1215. {
  1216. #ifdef HAS_XTS
  1217. crypto_unregister_alg(&ablk_xts_alg);
  1218. #endif
  1219. #ifdef HAS_PCBC
  1220. crypto_unregister_alg(&ablk_pcbc_alg);
  1221. #endif
  1222. #ifdef HAS_LRW
  1223. crypto_unregister_alg(&ablk_lrw_alg);
  1224. #endif
  1225. #ifdef CONFIG_X86_64
  1226. #ifdef HAS_CTR
  1227. crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
  1228. #endif
  1229. crypto_unregister_alg(&rfc4106_alg);
  1230. crypto_unregister_alg(&__rfc4106_alg);
  1231. crypto_unregister_alg(&ablk_ctr_alg);
  1232. crypto_unregister_alg(&blk_ctr_alg);
  1233. #endif
  1234. crypto_unregister_alg(&ablk_cbc_alg);
  1235. crypto_unregister_alg(&ablk_ecb_alg);
  1236. crypto_unregister_alg(&blk_cbc_alg);
  1237. crypto_unregister_alg(&blk_ecb_alg);
  1238. crypto_unregister_alg(&__aesni_alg);
  1239. crypto_unregister_alg(&aesni_alg);
  1240. crypto_fpu_exit();
  1241. }
  1242. module_init(aesni_init);
  1243. module_exit(aesni_exit);
  1244. MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
  1245. MODULE_LICENSE("GPL");
  1246. MODULE_ALIAS("aes");