aes_s390.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. /*
  2. * Cryptographic API.
  3. *
  4. * s390 implementation of the AES Cipher Algorithm.
  5. *
  6. * s390 Version:
  7. * Copyright IBM Corp. 2005, 2007
  8. * Author(s): Jan Glauber (jang@de.ibm.com)
  9. * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
  10. *
  11. * Derived from "crypto/aes_generic.c"
  12. *
  13. * This program is free software; you can redistribute it and/or modify it
  14. * under the terms of the GNU General Public License as published by the Free
  15. * Software Foundation; either version 2 of the License, or (at your option)
  16. * any later version.
  17. *
  18. */
  19. #define KMSG_COMPONENT "aes_s390"
  20. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  21. #include <crypto/aes.h>
  22. #include <crypto/algapi.h>
  23. #include <crypto/internal/skcipher.h>
  24. #include <linux/err.h>
  25. #include <linux/module.h>
  26. #include <linux/cpufeature.h>
  27. #include <linux/init.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/fips.h>
  30. #include <crypto/xts.h>
  31. #include <asm/cpacf.h>
  32. static u8 *ctrblk;
  33. static DEFINE_SPINLOCK(ctrblk_lock);
  34. static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
  35. struct s390_aes_ctx {
  36. u8 key[AES_MAX_KEY_SIZE];
  37. int key_len;
  38. unsigned long fc;
  39. union {
  40. struct crypto_skcipher *blk;
  41. struct crypto_cipher *cip;
  42. } fallback;
  43. };
  44. struct s390_xts_ctx {
  45. u8 key[32];
  46. u8 pcc_key[32];
  47. int key_len;
  48. unsigned long fc;
  49. struct crypto_skcipher *fallback;
  50. };
  51. static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
  52. unsigned int key_len)
  53. {
  54. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  55. int ret;
  56. sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
  57. sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
  58. CRYPTO_TFM_REQ_MASK);
  59. ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
  60. if (ret) {
  61. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  62. tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
  63. CRYPTO_TFM_RES_MASK);
  64. }
  65. return ret;
  66. }
  67. static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  68. unsigned int key_len)
  69. {
  70. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  71. unsigned long fc;
  72. /* Pick the correct function code based on the key length */
  73. fc = (key_len == 16) ? CPACF_KM_AES_128 :
  74. (key_len == 24) ? CPACF_KM_AES_192 :
  75. (key_len == 32) ? CPACF_KM_AES_256 : 0;
  76. /* Check if the function code is available */
  77. sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  78. if (!sctx->fc)
  79. return setkey_fallback_cip(tfm, in_key, key_len);
  80. sctx->key_len = key_len;
  81. memcpy(sctx->key, in_key, key_len);
  82. return 0;
  83. }
  84. static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  85. {
  86. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  87. if (unlikely(!sctx->fc)) {
  88. crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
  89. return;
  90. }
  91. cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
  92. }
  93. static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
  94. {
  95. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  96. if (unlikely(!sctx->fc)) {
  97. crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
  98. return;
  99. }
  100. cpacf_km(sctx->fc | CPACF_DECRYPT,
  101. &sctx->key, out, in, AES_BLOCK_SIZE);
  102. }
  103. static int fallback_init_cip(struct crypto_tfm *tfm)
  104. {
  105. const char *name = tfm->__crt_alg->cra_name;
  106. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  107. sctx->fallback.cip = crypto_alloc_cipher(name, 0,
  108. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  109. if (IS_ERR(sctx->fallback.cip)) {
  110. pr_err("Allocating AES fallback algorithm %s failed\n",
  111. name);
  112. return PTR_ERR(sctx->fallback.cip);
  113. }
  114. return 0;
  115. }
  116. static void fallback_exit_cip(struct crypto_tfm *tfm)
  117. {
  118. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  119. crypto_free_cipher(sctx->fallback.cip);
  120. sctx->fallback.cip = NULL;
  121. }
  122. static struct crypto_alg aes_alg = {
  123. .cra_name = "aes",
  124. .cra_driver_name = "aes-s390",
  125. .cra_priority = 300,
  126. .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
  127. CRYPTO_ALG_NEED_FALLBACK,
  128. .cra_blocksize = AES_BLOCK_SIZE,
  129. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  130. .cra_module = THIS_MODULE,
  131. .cra_init = fallback_init_cip,
  132. .cra_exit = fallback_exit_cip,
  133. .cra_u = {
  134. .cipher = {
  135. .cia_min_keysize = AES_MIN_KEY_SIZE,
  136. .cia_max_keysize = AES_MAX_KEY_SIZE,
  137. .cia_setkey = aes_set_key,
  138. .cia_encrypt = aes_encrypt,
  139. .cia_decrypt = aes_decrypt,
  140. }
  141. }
  142. };
  143. static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
  144. unsigned int len)
  145. {
  146. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  147. unsigned int ret;
  148. crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK);
  149. crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
  150. CRYPTO_TFM_REQ_MASK);
  151. ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len);
  152. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  153. tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) &
  154. CRYPTO_TFM_RES_MASK;
  155. return ret;
  156. }
  157. static int fallback_blk_dec(struct blkcipher_desc *desc,
  158. struct scatterlist *dst, struct scatterlist *src,
  159. unsigned int nbytes)
  160. {
  161. unsigned int ret;
  162. struct crypto_blkcipher *tfm = desc->tfm;
  163. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
  164. SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
  165. skcipher_request_set_tfm(req, sctx->fallback.blk);
  166. skcipher_request_set_callback(req, desc->flags, NULL, NULL);
  167. skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
  168. ret = crypto_skcipher_decrypt(req);
  169. skcipher_request_zero(req);
  170. return ret;
  171. }
  172. static int fallback_blk_enc(struct blkcipher_desc *desc,
  173. struct scatterlist *dst, struct scatterlist *src,
  174. unsigned int nbytes)
  175. {
  176. unsigned int ret;
  177. struct crypto_blkcipher *tfm = desc->tfm;
  178. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
  179. SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
  180. skcipher_request_set_tfm(req, sctx->fallback.blk);
  181. skcipher_request_set_callback(req, desc->flags, NULL, NULL);
  182. skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
  183. ret = crypto_skcipher_encrypt(req);
  184. return ret;
  185. }
  186. static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  187. unsigned int key_len)
  188. {
  189. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  190. unsigned long fc;
  191. /* Pick the correct function code based on the key length */
  192. fc = (key_len == 16) ? CPACF_KM_AES_128 :
  193. (key_len == 24) ? CPACF_KM_AES_192 :
  194. (key_len == 32) ? CPACF_KM_AES_256 : 0;
  195. /* Check if the function code is available */
  196. sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  197. if (!sctx->fc)
  198. return setkey_fallback_blk(tfm, in_key, key_len);
  199. sctx->key_len = key_len;
  200. memcpy(sctx->key, in_key, key_len);
  201. return 0;
  202. }
  203. static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
  204. struct blkcipher_walk *walk)
  205. {
  206. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  207. unsigned int nbytes, n;
  208. int ret;
  209. ret = blkcipher_walk_virt(desc, walk);
  210. while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
  211. /* only use complete blocks */
  212. n = nbytes & ~(AES_BLOCK_SIZE - 1);
  213. cpacf_km(sctx->fc | modifier, sctx->key,
  214. walk->dst.virt.addr, walk->src.virt.addr, n);
  215. ret = blkcipher_walk_done(desc, walk, nbytes - n);
  216. }
  217. return ret;
  218. }
  219. static int ecb_aes_encrypt(struct blkcipher_desc *desc,
  220. struct scatterlist *dst, struct scatterlist *src,
  221. unsigned int nbytes)
  222. {
  223. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  224. struct blkcipher_walk walk;
  225. if (unlikely(!sctx->fc))
  226. return fallback_blk_enc(desc, dst, src, nbytes);
  227. blkcipher_walk_init(&walk, dst, src, nbytes);
  228. return ecb_aes_crypt(desc, 0, &walk);
  229. }
  230. static int ecb_aes_decrypt(struct blkcipher_desc *desc,
  231. struct scatterlist *dst, struct scatterlist *src,
  232. unsigned int nbytes)
  233. {
  234. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  235. struct blkcipher_walk walk;
  236. if (unlikely(!sctx->fc))
  237. return fallback_blk_dec(desc, dst, src, nbytes);
  238. blkcipher_walk_init(&walk, dst, src, nbytes);
  239. return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
  240. }
  241. static int fallback_init_blk(struct crypto_tfm *tfm)
  242. {
  243. const char *name = tfm->__crt_alg->cra_name;
  244. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  245. sctx->fallback.blk = crypto_alloc_skcipher(name, 0,
  246. CRYPTO_ALG_ASYNC |
  247. CRYPTO_ALG_NEED_FALLBACK);
  248. if (IS_ERR(sctx->fallback.blk)) {
  249. pr_err("Allocating AES fallback algorithm %s failed\n",
  250. name);
  251. return PTR_ERR(sctx->fallback.blk);
  252. }
  253. return 0;
  254. }
  255. static void fallback_exit_blk(struct crypto_tfm *tfm)
  256. {
  257. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  258. crypto_free_skcipher(sctx->fallback.blk);
  259. }
  260. static struct crypto_alg ecb_aes_alg = {
  261. .cra_name = "ecb(aes)",
  262. .cra_driver_name = "ecb-aes-s390",
  263. .cra_priority = 400, /* combo: aes + ecb */
  264. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  265. CRYPTO_ALG_NEED_FALLBACK,
  266. .cra_blocksize = AES_BLOCK_SIZE,
  267. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  268. .cra_type = &crypto_blkcipher_type,
  269. .cra_module = THIS_MODULE,
  270. .cra_init = fallback_init_blk,
  271. .cra_exit = fallback_exit_blk,
  272. .cra_u = {
  273. .blkcipher = {
  274. .min_keysize = AES_MIN_KEY_SIZE,
  275. .max_keysize = AES_MAX_KEY_SIZE,
  276. .setkey = ecb_aes_set_key,
  277. .encrypt = ecb_aes_encrypt,
  278. .decrypt = ecb_aes_decrypt,
  279. }
  280. }
  281. };
  282. static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  283. unsigned int key_len)
  284. {
  285. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  286. unsigned long fc;
  287. /* Pick the correct function code based on the key length */
  288. fc = (key_len == 16) ? CPACF_KMC_AES_128 :
  289. (key_len == 24) ? CPACF_KMC_AES_192 :
  290. (key_len == 32) ? CPACF_KMC_AES_256 : 0;
  291. /* Check if the function code is available */
  292. sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
  293. if (!sctx->fc)
  294. return setkey_fallback_blk(tfm, in_key, key_len);
  295. sctx->key_len = key_len;
  296. memcpy(sctx->key, in_key, key_len);
  297. return 0;
  298. }
  299. static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
  300. struct blkcipher_walk *walk)
  301. {
  302. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  303. unsigned int nbytes, n;
  304. int ret;
  305. struct {
  306. u8 iv[AES_BLOCK_SIZE];
  307. u8 key[AES_MAX_KEY_SIZE];
  308. } param;
  309. ret = blkcipher_walk_virt(desc, walk);
  310. memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
  311. memcpy(param.key, sctx->key, sctx->key_len);
  312. while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
  313. /* only use complete blocks */
  314. n = nbytes & ~(AES_BLOCK_SIZE - 1);
  315. cpacf_kmc(sctx->fc | modifier, &param,
  316. walk->dst.virt.addr, walk->src.virt.addr, n);
  317. ret = blkcipher_walk_done(desc, walk, nbytes - n);
  318. }
  319. memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
  320. return ret;
  321. }
  322. static int cbc_aes_encrypt(struct blkcipher_desc *desc,
  323. struct scatterlist *dst, struct scatterlist *src,
  324. unsigned int nbytes)
  325. {
  326. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  327. struct blkcipher_walk walk;
  328. if (unlikely(!sctx->fc))
  329. return fallback_blk_enc(desc, dst, src, nbytes);
  330. blkcipher_walk_init(&walk, dst, src, nbytes);
  331. return cbc_aes_crypt(desc, 0, &walk);
  332. }
  333. static int cbc_aes_decrypt(struct blkcipher_desc *desc,
  334. struct scatterlist *dst, struct scatterlist *src,
  335. unsigned int nbytes)
  336. {
  337. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  338. struct blkcipher_walk walk;
  339. if (unlikely(!sctx->fc))
  340. return fallback_blk_dec(desc, dst, src, nbytes);
  341. blkcipher_walk_init(&walk, dst, src, nbytes);
  342. return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
  343. }
  344. static struct crypto_alg cbc_aes_alg = {
  345. .cra_name = "cbc(aes)",
  346. .cra_driver_name = "cbc-aes-s390",
  347. .cra_priority = 400, /* combo: aes + cbc */
  348. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  349. CRYPTO_ALG_NEED_FALLBACK,
  350. .cra_blocksize = AES_BLOCK_SIZE,
  351. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  352. .cra_type = &crypto_blkcipher_type,
  353. .cra_module = THIS_MODULE,
  354. .cra_init = fallback_init_blk,
  355. .cra_exit = fallback_exit_blk,
  356. .cra_u = {
  357. .blkcipher = {
  358. .min_keysize = AES_MIN_KEY_SIZE,
  359. .max_keysize = AES_MAX_KEY_SIZE,
  360. .ivsize = AES_BLOCK_SIZE,
  361. .setkey = cbc_aes_set_key,
  362. .encrypt = cbc_aes_encrypt,
  363. .decrypt = cbc_aes_decrypt,
  364. }
  365. }
  366. };
  367. static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
  368. unsigned int len)
  369. {
  370. struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
  371. unsigned int ret;
  372. crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
  373. crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
  374. CRYPTO_TFM_REQ_MASK);
  375. ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
  376. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  377. tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) &
  378. CRYPTO_TFM_RES_MASK;
  379. return ret;
  380. }
  381. static int xts_fallback_decrypt(struct blkcipher_desc *desc,
  382. struct scatterlist *dst, struct scatterlist *src,
  383. unsigned int nbytes)
  384. {
  385. struct crypto_blkcipher *tfm = desc->tfm;
  386. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
  387. SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
  388. unsigned int ret;
  389. skcipher_request_set_tfm(req, xts_ctx->fallback);
  390. skcipher_request_set_callback(req, desc->flags, NULL, NULL);
  391. skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
  392. ret = crypto_skcipher_decrypt(req);
  393. skcipher_request_zero(req);
  394. return ret;
  395. }
  396. static int xts_fallback_encrypt(struct blkcipher_desc *desc,
  397. struct scatterlist *dst, struct scatterlist *src,
  398. unsigned int nbytes)
  399. {
  400. struct crypto_blkcipher *tfm = desc->tfm;
  401. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
  402. SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
  403. unsigned int ret;
  404. skcipher_request_set_tfm(req, xts_ctx->fallback);
  405. skcipher_request_set_callback(req, desc->flags, NULL, NULL);
  406. skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
  407. ret = crypto_skcipher_encrypt(req);
  408. skcipher_request_zero(req);
  409. return ret;
  410. }
  411. static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  412. unsigned int key_len)
  413. {
  414. struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
  415. unsigned long fc;
  416. int err;
  417. err = xts_check_key(tfm, in_key, key_len);
  418. if (err)
  419. return err;
  420. /* In fips mode only 128 bit or 256 bit keys are valid */
  421. if (fips_enabled && key_len != 32 && key_len != 64) {
  422. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  423. return -EINVAL;
  424. }
  425. /* Pick the correct function code based on the key length */
  426. fc = (key_len == 32) ? CPACF_KM_XTS_128 :
  427. (key_len == 64) ? CPACF_KM_XTS_256 : 0;
  428. /* Check if the function code is available */
  429. xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
  430. if (!xts_ctx->fc)
  431. return xts_fallback_setkey(tfm, in_key, key_len);
  432. /* Split the XTS key into the two subkeys */
  433. key_len = key_len / 2;
  434. xts_ctx->key_len = key_len;
  435. memcpy(xts_ctx->key, in_key, key_len);
  436. memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
  437. return 0;
  438. }
  439. static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
  440. struct blkcipher_walk *walk)
  441. {
  442. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
  443. unsigned int offset, nbytes, n;
  444. int ret;
  445. struct {
  446. u8 key[32];
  447. u8 tweak[16];
  448. u8 block[16];
  449. u8 bit[16];
  450. u8 xts[16];
  451. } pcc_param;
  452. struct {
  453. u8 key[32];
  454. u8 init[16];
  455. } xts_param;
  456. ret = blkcipher_walk_virt(desc, walk);
  457. offset = xts_ctx->key_len & 0x10;
  458. memset(pcc_param.block, 0, sizeof(pcc_param.block));
  459. memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
  460. memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
  461. memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
  462. memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
  463. cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
  464. memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
  465. memcpy(xts_param.init, pcc_param.xts, 16);
  466. while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
  467. /* only use complete blocks */
  468. n = nbytes & ~(AES_BLOCK_SIZE - 1);
  469. cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
  470. walk->dst.virt.addr, walk->src.virt.addr, n);
  471. ret = blkcipher_walk_done(desc, walk, nbytes - n);
  472. }
  473. return ret;
  474. }
  475. static int xts_aes_encrypt(struct blkcipher_desc *desc,
  476. struct scatterlist *dst, struct scatterlist *src,
  477. unsigned int nbytes)
  478. {
  479. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
  480. struct blkcipher_walk walk;
  481. if (unlikely(!xts_ctx->fc))
  482. return xts_fallback_encrypt(desc, dst, src, nbytes);
  483. blkcipher_walk_init(&walk, dst, src, nbytes);
  484. return xts_aes_crypt(desc, 0, &walk);
  485. }
  486. static int xts_aes_decrypt(struct blkcipher_desc *desc,
  487. struct scatterlist *dst, struct scatterlist *src,
  488. unsigned int nbytes)
  489. {
  490. struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
  491. struct blkcipher_walk walk;
  492. if (unlikely(!xts_ctx->fc))
  493. return xts_fallback_decrypt(desc, dst, src, nbytes);
  494. blkcipher_walk_init(&walk, dst, src, nbytes);
  495. return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
  496. }
  497. static int xts_fallback_init(struct crypto_tfm *tfm)
  498. {
  499. const char *name = tfm->__crt_alg->cra_name;
  500. struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
  501. xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
  502. CRYPTO_ALG_ASYNC |
  503. CRYPTO_ALG_NEED_FALLBACK);
  504. if (IS_ERR(xts_ctx->fallback)) {
  505. pr_err("Allocating XTS fallback algorithm %s failed\n",
  506. name);
  507. return PTR_ERR(xts_ctx->fallback);
  508. }
  509. return 0;
  510. }
  511. static void xts_fallback_exit(struct crypto_tfm *tfm)
  512. {
  513. struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
  514. crypto_free_skcipher(xts_ctx->fallback);
  515. }
  516. static struct crypto_alg xts_aes_alg = {
  517. .cra_name = "xts(aes)",
  518. .cra_driver_name = "xts-aes-s390",
  519. .cra_priority = 400, /* combo: aes + xts */
  520. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  521. CRYPTO_ALG_NEED_FALLBACK,
  522. .cra_blocksize = AES_BLOCK_SIZE,
  523. .cra_ctxsize = sizeof(struct s390_xts_ctx),
  524. .cra_type = &crypto_blkcipher_type,
  525. .cra_module = THIS_MODULE,
  526. .cra_init = xts_fallback_init,
  527. .cra_exit = xts_fallback_exit,
  528. .cra_u = {
  529. .blkcipher = {
  530. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  531. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  532. .ivsize = AES_BLOCK_SIZE,
  533. .setkey = xts_aes_set_key,
  534. .encrypt = xts_aes_encrypt,
  535. .decrypt = xts_aes_decrypt,
  536. }
  537. }
  538. };
  539. static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  540. unsigned int key_len)
  541. {
  542. struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
  543. unsigned long fc;
  544. /* Pick the correct function code based on the key length */
  545. fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
  546. (key_len == 24) ? CPACF_KMCTR_AES_192 :
  547. (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
  548. /* Check if the function code is available */
  549. sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
  550. if (!sctx->fc)
  551. return setkey_fallback_blk(tfm, in_key, key_len);
  552. sctx->key_len = key_len;
  553. memcpy(sctx->key, in_key, key_len);
  554. return 0;
  555. }
  556. static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
  557. {
  558. unsigned int i, n;
  559. /* only use complete blocks, max. PAGE_SIZE */
  560. memcpy(ctrptr, iv, AES_BLOCK_SIZE);
  561. n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
  562. for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
  563. memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
  564. crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
  565. ctrptr += AES_BLOCK_SIZE;
  566. }
  567. return n;
  568. }
  569. static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
  570. struct blkcipher_walk *walk)
  571. {
  572. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  573. u8 buf[AES_BLOCK_SIZE], *ctrptr;
  574. unsigned int n, nbytes;
  575. int ret, locked;
  576. locked = spin_trylock(&ctrblk_lock);
  577. ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
  578. while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
  579. n = AES_BLOCK_SIZE;
  580. if (nbytes >= 2*AES_BLOCK_SIZE && locked)
  581. n = __ctrblk_init(ctrblk, walk->iv, nbytes);
  582. ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
  583. cpacf_kmctr(sctx->fc | modifier, sctx->key,
  584. walk->dst.virt.addr, walk->src.virt.addr,
  585. n, ctrptr);
  586. if (ctrptr == ctrblk)
  587. memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
  588. AES_BLOCK_SIZE);
  589. crypto_inc(walk->iv, AES_BLOCK_SIZE);
  590. ret = blkcipher_walk_done(desc, walk, nbytes - n);
  591. }
  592. if (locked)
  593. spin_unlock(&ctrblk_lock);
  594. /*
  595. * final block may be < AES_BLOCK_SIZE, copy only nbytes
  596. */
  597. if (nbytes) {
  598. cpacf_kmctr(sctx->fc | modifier, sctx->key,
  599. buf, walk->src.virt.addr,
  600. AES_BLOCK_SIZE, walk->iv);
  601. memcpy(walk->dst.virt.addr, buf, nbytes);
  602. crypto_inc(walk->iv, AES_BLOCK_SIZE);
  603. ret = blkcipher_walk_done(desc, walk, 0);
  604. }
  605. return ret;
  606. }
  607. static int ctr_aes_encrypt(struct blkcipher_desc *desc,
  608. struct scatterlist *dst, struct scatterlist *src,
  609. unsigned int nbytes)
  610. {
  611. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  612. struct blkcipher_walk walk;
  613. if (unlikely(!sctx->fc))
  614. return fallback_blk_enc(desc, dst, src, nbytes);
  615. blkcipher_walk_init(&walk, dst, src, nbytes);
  616. return ctr_aes_crypt(desc, 0, &walk);
  617. }
  618. static int ctr_aes_decrypt(struct blkcipher_desc *desc,
  619. struct scatterlist *dst, struct scatterlist *src,
  620. unsigned int nbytes)
  621. {
  622. struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
  623. struct blkcipher_walk walk;
  624. if (unlikely(!sctx->fc))
  625. return fallback_blk_dec(desc, dst, src, nbytes);
  626. blkcipher_walk_init(&walk, dst, src, nbytes);
  627. return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
  628. }
  629. static struct crypto_alg ctr_aes_alg = {
  630. .cra_name = "ctr(aes)",
  631. .cra_driver_name = "ctr-aes-s390",
  632. .cra_priority = 400, /* combo: aes + ctr */
  633. .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
  634. CRYPTO_ALG_NEED_FALLBACK,
  635. .cra_blocksize = 1,
  636. .cra_ctxsize = sizeof(struct s390_aes_ctx),
  637. .cra_type = &crypto_blkcipher_type,
  638. .cra_module = THIS_MODULE,
  639. .cra_init = fallback_init_blk,
  640. .cra_exit = fallback_exit_blk,
  641. .cra_u = {
  642. .blkcipher = {
  643. .min_keysize = AES_MIN_KEY_SIZE,
  644. .max_keysize = AES_MAX_KEY_SIZE,
  645. .ivsize = AES_BLOCK_SIZE,
  646. .setkey = ctr_aes_set_key,
  647. .encrypt = ctr_aes_encrypt,
  648. .decrypt = ctr_aes_decrypt,
  649. }
  650. }
  651. };
  652. static struct crypto_alg *aes_s390_algs_ptr[5];
  653. static int aes_s390_algs_num;
  654. static int aes_s390_register_alg(struct crypto_alg *alg)
  655. {
  656. int ret;
  657. ret = crypto_register_alg(alg);
  658. if (!ret)
  659. aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
  660. return ret;
  661. }
  662. static void aes_s390_fini(void)
  663. {
  664. while (aes_s390_algs_num--)
  665. crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
  666. if (ctrblk)
  667. free_page((unsigned long) ctrblk);
  668. }
  669. static int __init aes_s390_init(void)
  670. {
  671. int ret;
  672. /* Query available functions for KM, KMC and KMCTR */
  673. cpacf_query(CPACF_KM, &km_functions);
  674. cpacf_query(CPACF_KMC, &kmc_functions);
  675. cpacf_query(CPACF_KMCTR, &kmctr_functions);
  676. if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
  677. cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
  678. cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
  679. ret = aes_s390_register_alg(&aes_alg);
  680. if (ret)
  681. goto out_err;
  682. ret = aes_s390_register_alg(&ecb_aes_alg);
  683. if (ret)
  684. goto out_err;
  685. }
  686. if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
  687. cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
  688. cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
  689. ret = aes_s390_register_alg(&cbc_aes_alg);
  690. if (ret)
  691. goto out_err;
  692. }
  693. if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
  694. cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
  695. ret = aes_s390_register_alg(&xts_aes_alg);
  696. if (ret)
  697. goto out_err;
  698. }
  699. if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
  700. cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
  701. cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
  702. ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
  703. if (!ctrblk) {
  704. ret = -ENOMEM;
  705. goto out_err;
  706. }
  707. ret = aes_s390_register_alg(&ctr_aes_alg);
  708. if (ret)
  709. goto out_err;
  710. }
  711. return 0;
  712. out_err:
  713. aes_s390_fini();
  714. return ret;
  715. }
  716. module_cpu_feature_match(MSA, aes_s390_init);
  717. module_exit(aes_s390_fini);
  718. MODULE_ALIAS_CRYPTO("aes-all");
  719. MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
  720. MODULE_LICENSE("GPL");