ccm.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. /*
  2. * CCM: Counter with CBC-MAC
  3. *
  4. * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the Free
  8. * Software Foundation; either version 2 of the License, or (at your option)
  9. * any later version.
  10. *
  11. */
  12. #include <crypto/internal/aead.h>
  13. #include <crypto/internal/skcipher.h>
  14. #include <crypto/scatterwalk.h>
  15. #include <linux/err.h>
  16. #include <linux/init.h>
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/slab.h>
  20. #include "internal.h"
  21. struct ccm_instance_ctx {
  22. struct crypto_skcipher_spawn ctr;
  23. struct crypto_spawn cipher;
  24. };
  25. struct crypto_ccm_ctx {
  26. struct crypto_cipher *cipher;
  27. struct crypto_ablkcipher *ctr;
  28. };
  29. struct crypto_rfc4309_ctx {
  30. struct crypto_aead *child;
  31. u8 nonce[3];
  32. };
  33. struct crypto_ccm_req_priv_ctx {
  34. u8 odata[16];
  35. u8 idata[16];
  36. u8 auth_tag[16];
  37. u32 ilen;
  38. u32 flags;
  39. struct scatterlist src[2];
  40. struct scatterlist dst[2];
  41. struct ablkcipher_request abreq;
  42. };
  43. static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
  44. struct aead_request *req)
  45. {
  46. unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
  47. return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
  48. }
  49. static int set_msg_len(u8 *block, unsigned int msglen, int csize)
  50. {
  51. __be32 data;
  52. memset(block, 0, csize);
  53. block += csize;
  54. if (csize >= 4)
  55. csize = 4;
  56. else if (msglen > (1 << (8 * csize)))
  57. return -EOVERFLOW;
  58. data = cpu_to_be32(msglen);
  59. memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
  60. return 0;
  61. }
  62. static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key,
  63. unsigned int keylen)
  64. {
  65. struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
  66. struct crypto_ablkcipher *ctr = ctx->ctr;
  67. struct crypto_cipher *tfm = ctx->cipher;
  68. int err = 0;
  69. crypto_ablkcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
  70. crypto_ablkcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
  71. CRYPTO_TFM_REQ_MASK);
  72. err = crypto_ablkcipher_setkey(ctr, key, keylen);
  73. crypto_aead_set_flags(aead, crypto_ablkcipher_get_flags(ctr) &
  74. CRYPTO_TFM_RES_MASK);
  75. if (err)
  76. goto out;
  77. crypto_cipher_clear_flags(tfm, CRYPTO_TFM_REQ_MASK);
  78. crypto_cipher_set_flags(tfm, crypto_aead_get_flags(aead) &
  79. CRYPTO_TFM_REQ_MASK);
  80. err = crypto_cipher_setkey(tfm, key, keylen);
  81. crypto_aead_set_flags(aead, crypto_cipher_get_flags(tfm) &
  82. CRYPTO_TFM_RES_MASK);
  83. out:
  84. return err;
  85. }
  86. static int crypto_ccm_setauthsize(struct crypto_aead *tfm,
  87. unsigned int authsize)
  88. {
  89. switch (authsize) {
  90. case 4:
  91. case 6:
  92. case 8:
  93. case 10:
  94. case 12:
  95. case 14:
  96. case 16:
  97. break;
  98. default:
  99. return -EINVAL;
  100. }
  101. return 0;
  102. }
  103. static int format_input(u8 *info, struct aead_request *req,
  104. unsigned int cryptlen)
  105. {
  106. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  107. unsigned int lp = req->iv[0];
  108. unsigned int l = lp + 1;
  109. unsigned int m;
  110. m = crypto_aead_authsize(aead);
  111. memcpy(info, req->iv, 16);
  112. /* format control info per RFC 3610 and
  113. * NIST Special Publication 800-38C
  114. */
  115. *info |= (8 * ((m - 2) / 2));
  116. if (req->assoclen)
  117. *info |= 64;
  118. return set_msg_len(info + 16 - l, cryptlen, l);
  119. }
  120. static int format_adata(u8 *adata, unsigned int a)
  121. {
  122. int len = 0;
  123. /* add control info for associated data
  124. * RFC 3610 and NIST Special Publication 800-38C
  125. */
  126. if (a < 65280) {
  127. *(__be16 *)adata = cpu_to_be16(a);
  128. len = 2;
  129. } else {
  130. *(__be16 *)adata = cpu_to_be16(0xfffe);
  131. *(__be32 *)&adata[2] = cpu_to_be32(a);
  132. len = 6;
  133. }
  134. return len;
  135. }
  136. static void compute_mac(struct crypto_cipher *tfm, u8 *data, int n,
  137. struct crypto_ccm_req_priv_ctx *pctx)
  138. {
  139. unsigned int bs = 16;
  140. u8 *odata = pctx->odata;
  141. u8 *idata = pctx->idata;
  142. int datalen, getlen;
  143. datalen = n;
  144. /* first time in here, block may be partially filled. */
  145. getlen = bs - pctx->ilen;
  146. if (datalen >= getlen) {
  147. memcpy(idata + pctx->ilen, data, getlen);
  148. crypto_xor(odata, idata, bs);
  149. crypto_cipher_encrypt_one(tfm, odata, odata);
  150. datalen -= getlen;
  151. data += getlen;
  152. pctx->ilen = 0;
  153. }
  154. /* now encrypt rest of data */
  155. while (datalen >= bs) {
  156. crypto_xor(odata, data, bs);
  157. crypto_cipher_encrypt_one(tfm, odata, odata);
  158. datalen -= bs;
  159. data += bs;
  160. }
  161. /* check and see if there's leftover data that wasn't
  162. * enough to fill a block.
  163. */
  164. if (datalen) {
  165. memcpy(idata + pctx->ilen, data, datalen);
  166. pctx->ilen += datalen;
  167. }
  168. }
  169. static void get_data_to_compute(struct crypto_cipher *tfm,
  170. struct crypto_ccm_req_priv_ctx *pctx,
  171. struct scatterlist *sg, unsigned int len)
  172. {
  173. struct scatter_walk walk;
  174. u8 *data_src;
  175. int n;
  176. scatterwalk_start(&walk, sg);
  177. while (len) {
  178. n = scatterwalk_clamp(&walk, len);
  179. if (!n) {
  180. scatterwalk_start(&walk, sg_next(walk.sg));
  181. n = scatterwalk_clamp(&walk, len);
  182. }
  183. data_src = scatterwalk_map(&walk, 0);
  184. compute_mac(tfm, data_src, n, pctx);
  185. len -= n;
  186. scatterwalk_unmap(data_src, 0);
  187. scatterwalk_advance(&walk, n);
  188. scatterwalk_done(&walk, 0, len);
  189. if (len)
  190. crypto_yield(pctx->flags);
  191. }
  192. /* any leftover needs padding and then encrypted */
  193. if (pctx->ilen) {
  194. int padlen;
  195. u8 *odata = pctx->odata;
  196. u8 *idata = pctx->idata;
  197. padlen = 16 - pctx->ilen;
  198. memset(idata + pctx->ilen, 0, padlen);
  199. crypto_xor(odata, idata, 16);
  200. crypto_cipher_encrypt_one(tfm, odata, odata);
  201. pctx->ilen = 0;
  202. }
  203. }
  204. static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
  205. unsigned int cryptlen)
  206. {
  207. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  208. struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
  209. struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
  210. struct crypto_cipher *cipher = ctx->cipher;
  211. unsigned int assoclen = req->assoclen;
  212. u8 *odata = pctx->odata;
  213. u8 *idata = pctx->idata;
  214. int err;
  215. /* format control data for input */
  216. err = format_input(odata, req, cryptlen);
  217. if (err)
  218. goto out;
  219. /* encrypt first block to use as start in computing mac */
  220. crypto_cipher_encrypt_one(cipher, odata, odata);
  221. /* format associated data and compute into mac */
  222. if (assoclen) {
  223. pctx->ilen = format_adata(idata, assoclen);
  224. get_data_to_compute(cipher, pctx, req->assoc, req->assoclen);
  225. } else {
  226. pctx->ilen = 0;
  227. }
  228. /* compute plaintext into mac */
  229. get_data_to_compute(cipher, pctx, plain, cryptlen);
  230. out:
  231. return err;
  232. }
  233. static void crypto_ccm_encrypt_done(struct crypto_async_request *areq, int err)
  234. {
  235. struct aead_request *req = areq->data;
  236. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  237. struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
  238. u8 *odata = pctx->odata;
  239. if (!err)
  240. scatterwalk_map_and_copy(odata, req->dst, req->cryptlen,
  241. crypto_aead_authsize(aead), 1);
  242. aead_request_complete(req, err);
  243. }
  244. static inline int crypto_ccm_check_iv(const u8 *iv)
  245. {
  246. /* 2 <= L <= 8, so 1 <= L' <= 7. */
  247. if (1 > iv[0] || iv[0] > 7)
  248. return -EINVAL;
  249. return 0;
  250. }
  251. static int crypto_ccm_encrypt(struct aead_request *req)
  252. {
  253. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  254. struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
  255. struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
  256. struct ablkcipher_request *abreq = &pctx->abreq;
  257. struct scatterlist *dst;
  258. unsigned int cryptlen = req->cryptlen;
  259. u8 *odata = pctx->odata;
  260. u8 *iv = req->iv;
  261. int err;
  262. err = crypto_ccm_check_iv(iv);
  263. if (err)
  264. return err;
  265. pctx->flags = aead_request_flags(req);
  266. err = crypto_ccm_auth(req, req->src, cryptlen);
  267. if (err)
  268. return err;
  269. /* Note: rfc 3610 and NIST 800-38C require counter of
  270. * zero to encrypt auth tag.
  271. */
  272. memset(iv + 15 - iv[0], 0, iv[0] + 1);
  273. sg_init_table(pctx->src, 2);
  274. sg_set_buf(pctx->src, odata, 16);
  275. scatterwalk_sg_chain(pctx->src, 2, req->src);
  276. dst = pctx->src;
  277. if (req->src != req->dst) {
  278. sg_init_table(pctx->dst, 2);
  279. sg_set_buf(pctx->dst, odata, 16);
  280. scatterwalk_sg_chain(pctx->dst, 2, req->dst);
  281. dst = pctx->dst;
  282. }
  283. ablkcipher_request_set_tfm(abreq, ctx->ctr);
  284. ablkcipher_request_set_callback(abreq, pctx->flags,
  285. crypto_ccm_encrypt_done, req);
  286. ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
  287. err = crypto_ablkcipher_encrypt(abreq);
  288. if (err)
  289. return err;
  290. /* copy authtag to end of dst */
  291. scatterwalk_map_and_copy(odata, req->dst, cryptlen,
  292. crypto_aead_authsize(aead), 1);
  293. return err;
  294. }
  295. static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
  296. int err)
  297. {
  298. struct aead_request *req = areq->data;
  299. struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
  300. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  301. unsigned int authsize = crypto_aead_authsize(aead);
  302. unsigned int cryptlen = req->cryptlen - authsize;
  303. if (!err) {
  304. err = crypto_ccm_auth(req, req->dst, cryptlen);
  305. if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize))
  306. err = -EBADMSG;
  307. }
  308. aead_request_complete(req, err);
  309. }
  310. static int crypto_ccm_decrypt(struct aead_request *req)
  311. {
  312. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  313. struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
  314. struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
  315. struct ablkcipher_request *abreq = &pctx->abreq;
  316. struct scatterlist *dst;
  317. unsigned int authsize = crypto_aead_authsize(aead);
  318. unsigned int cryptlen = req->cryptlen;
  319. u8 *authtag = pctx->auth_tag;
  320. u8 *odata = pctx->odata;
  321. u8 *iv = req->iv;
  322. int err;
  323. if (cryptlen < authsize)
  324. return -EINVAL;
  325. cryptlen -= authsize;
  326. err = crypto_ccm_check_iv(iv);
  327. if (err)
  328. return err;
  329. pctx->flags = aead_request_flags(req);
  330. scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0);
  331. memset(iv + 15 - iv[0], 0, iv[0] + 1);
  332. sg_init_table(pctx->src, 2);
  333. sg_set_buf(pctx->src, authtag, 16);
  334. scatterwalk_sg_chain(pctx->src, 2, req->src);
  335. dst = pctx->src;
  336. if (req->src != req->dst) {
  337. sg_init_table(pctx->dst, 2);
  338. sg_set_buf(pctx->dst, authtag, 16);
  339. scatterwalk_sg_chain(pctx->dst, 2, req->dst);
  340. dst = pctx->dst;
  341. }
  342. ablkcipher_request_set_tfm(abreq, ctx->ctr);
  343. ablkcipher_request_set_callback(abreq, pctx->flags,
  344. crypto_ccm_decrypt_done, req);
  345. ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
  346. err = crypto_ablkcipher_decrypt(abreq);
  347. if (err)
  348. return err;
  349. err = crypto_ccm_auth(req, req->dst, cryptlen);
  350. if (err)
  351. return err;
  352. /* verify */
  353. if (memcmp(authtag, odata, authsize))
  354. return -EBADMSG;
  355. return err;
  356. }
  357. static int crypto_ccm_init_tfm(struct crypto_tfm *tfm)
  358. {
  359. struct crypto_instance *inst = (void *)tfm->__crt_alg;
  360. struct ccm_instance_ctx *ictx = crypto_instance_ctx(inst);
  361. struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
  362. struct crypto_cipher *cipher;
  363. struct crypto_ablkcipher *ctr;
  364. unsigned long align;
  365. int err;
  366. cipher = crypto_spawn_cipher(&ictx->cipher);
  367. if (IS_ERR(cipher))
  368. return PTR_ERR(cipher);
  369. ctr = crypto_spawn_skcipher(&ictx->ctr);
  370. err = PTR_ERR(ctr);
  371. if (IS_ERR(ctr))
  372. goto err_free_cipher;
  373. ctx->cipher = cipher;
  374. ctx->ctr = ctr;
  375. align = crypto_tfm_alg_alignmask(tfm);
  376. align &= ~(crypto_tfm_ctx_alignment() - 1);
  377. tfm->crt_aead.reqsize = align +
  378. sizeof(struct crypto_ccm_req_priv_ctx) +
  379. crypto_ablkcipher_reqsize(ctr);
  380. return 0;
  381. err_free_cipher:
  382. crypto_free_cipher(cipher);
  383. return err;
  384. }
  385. static void crypto_ccm_exit_tfm(struct crypto_tfm *tfm)
  386. {
  387. struct crypto_ccm_ctx *ctx = crypto_tfm_ctx(tfm);
  388. crypto_free_cipher(ctx->cipher);
  389. crypto_free_ablkcipher(ctx->ctr);
  390. }
  391. static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb,
  392. const char *full_name,
  393. const char *ctr_name,
  394. const char *cipher_name)
  395. {
  396. struct crypto_attr_type *algt;
  397. struct crypto_instance *inst;
  398. struct crypto_alg *ctr;
  399. struct crypto_alg *cipher;
  400. struct ccm_instance_ctx *ictx;
  401. int err;
  402. algt = crypto_get_attr_type(tb);
  403. err = PTR_ERR(algt);
  404. if (IS_ERR(algt))
  405. return ERR_PTR(err);
  406. if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
  407. return ERR_PTR(-EINVAL);
  408. cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER,
  409. CRYPTO_ALG_TYPE_MASK);
  410. err = PTR_ERR(cipher);
  411. if (IS_ERR(cipher))
  412. return ERR_PTR(err);
  413. err = -EINVAL;
  414. if (cipher->cra_blocksize != 16)
  415. goto out_put_cipher;
  416. inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
  417. err = -ENOMEM;
  418. if (!inst)
  419. goto out_put_cipher;
  420. ictx = crypto_instance_ctx(inst);
  421. err = crypto_init_spawn(&ictx->cipher, cipher, inst,
  422. CRYPTO_ALG_TYPE_MASK);
  423. if (err)
  424. goto err_free_inst;
  425. crypto_set_skcipher_spawn(&ictx->ctr, inst);
  426. err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0,
  427. crypto_requires_sync(algt->type,
  428. algt->mask));
  429. if (err)
  430. goto err_drop_cipher;
  431. ctr = crypto_skcipher_spawn_alg(&ictx->ctr);
  432. /* Not a stream cipher? */
  433. err = -EINVAL;
  434. if (ctr->cra_blocksize != 1)
  435. goto err_drop_ctr;
  436. /* We want the real thing! */
  437. if (ctr->cra_ablkcipher.ivsize != 16)
  438. goto err_drop_ctr;
  439. err = -ENAMETOOLONG;
  440. if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  441. "ccm_base(%s,%s)", ctr->cra_driver_name,
  442. cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
  443. goto err_drop_ctr;
  444. memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
  445. inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
  446. inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC;
  447. inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority;
  448. inst->alg.cra_blocksize = 1;
  449. inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask |
  450. (__alignof__(u32) - 1);
  451. inst->alg.cra_type = &crypto_aead_type;
  452. inst->alg.cra_aead.ivsize = 16;
  453. inst->alg.cra_aead.maxauthsize = 16;
  454. inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
  455. inst->alg.cra_init = crypto_ccm_init_tfm;
  456. inst->alg.cra_exit = crypto_ccm_exit_tfm;
  457. inst->alg.cra_aead.setkey = crypto_ccm_setkey;
  458. inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize;
  459. inst->alg.cra_aead.encrypt = crypto_ccm_encrypt;
  460. inst->alg.cra_aead.decrypt = crypto_ccm_decrypt;
  461. out:
  462. crypto_mod_put(cipher);
  463. return inst;
  464. err_drop_ctr:
  465. crypto_drop_skcipher(&ictx->ctr);
  466. err_drop_cipher:
  467. crypto_drop_spawn(&ictx->cipher);
  468. err_free_inst:
  469. kfree(inst);
  470. out_put_cipher:
  471. inst = ERR_PTR(err);
  472. goto out;
  473. }
  474. static struct crypto_instance *crypto_ccm_alloc(struct rtattr **tb)
  475. {
  476. int err;
  477. const char *cipher_name;
  478. char ctr_name[CRYPTO_MAX_ALG_NAME];
  479. char full_name[CRYPTO_MAX_ALG_NAME];
  480. cipher_name = crypto_attr_alg_name(tb[1]);
  481. err = PTR_ERR(cipher_name);
  482. if (IS_ERR(cipher_name))
  483. return ERR_PTR(err);
  484. if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
  485. cipher_name) >= CRYPTO_MAX_ALG_NAME)
  486. return ERR_PTR(-ENAMETOOLONG);
  487. if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
  488. CRYPTO_MAX_ALG_NAME)
  489. return ERR_PTR(-ENAMETOOLONG);
  490. return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
  491. }
  492. static void crypto_ccm_free(struct crypto_instance *inst)
  493. {
  494. struct ccm_instance_ctx *ctx = crypto_instance_ctx(inst);
  495. crypto_drop_spawn(&ctx->cipher);
  496. crypto_drop_skcipher(&ctx->ctr);
  497. kfree(inst);
  498. }
  499. static struct crypto_template crypto_ccm_tmpl = {
  500. .name = "ccm",
  501. .alloc = crypto_ccm_alloc,
  502. .free = crypto_ccm_free,
  503. .module = THIS_MODULE,
  504. };
  505. static struct crypto_instance *crypto_ccm_base_alloc(struct rtattr **tb)
  506. {
  507. int err;
  508. const char *ctr_name;
  509. const char *cipher_name;
  510. char full_name[CRYPTO_MAX_ALG_NAME];
  511. ctr_name = crypto_attr_alg_name(tb[1]);
  512. err = PTR_ERR(ctr_name);
  513. if (IS_ERR(ctr_name))
  514. return ERR_PTR(err);
  515. cipher_name = crypto_attr_alg_name(tb[2]);
  516. err = PTR_ERR(cipher_name);
  517. if (IS_ERR(cipher_name))
  518. return ERR_PTR(err);
  519. if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
  520. ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
  521. return ERR_PTR(-ENAMETOOLONG);
  522. return crypto_ccm_alloc_common(tb, full_name, ctr_name, cipher_name);
  523. }
  524. static struct crypto_template crypto_ccm_base_tmpl = {
  525. .name = "ccm_base",
  526. .alloc = crypto_ccm_base_alloc,
  527. .free = crypto_ccm_free,
  528. .module = THIS_MODULE,
  529. };
  530. static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
  531. unsigned int keylen)
  532. {
  533. struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
  534. struct crypto_aead *child = ctx->child;
  535. int err;
  536. if (keylen < 3)
  537. return -EINVAL;
  538. keylen -= 3;
  539. memcpy(ctx->nonce, key + keylen, 3);
  540. crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  541. crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
  542. CRYPTO_TFM_REQ_MASK);
  543. err = crypto_aead_setkey(child, key, keylen);
  544. crypto_aead_set_flags(parent, crypto_aead_get_flags(child) &
  545. CRYPTO_TFM_RES_MASK);
  546. return err;
  547. }
  548. static int crypto_rfc4309_setauthsize(struct crypto_aead *parent,
  549. unsigned int authsize)
  550. {
  551. struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
  552. switch (authsize) {
  553. case 8:
  554. case 12:
  555. case 16:
  556. break;
  557. default:
  558. return -EINVAL;
  559. }
  560. return crypto_aead_setauthsize(ctx->child, authsize);
  561. }
  562. static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
  563. {
  564. struct aead_request *subreq = aead_request_ctx(req);
  565. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  566. struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
  567. struct crypto_aead *child = ctx->child;
  568. u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
  569. crypto_aead_alignmask(child) + 1);
  570. /* L' */
  571. iv[0] = 3;
  572. memcpy(iv + 1, ctx->nonce, 3);
  573. memcpy(iv + 4, req->iv, 8);
  574. aead_request_set_tfm(subreq, child);
  575. aead_request_set_callback(subreq, req->base.flags, req->base.complete,
  576. req->base.data);
  577. aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv);
  578. aead_request_set_assoc(subreq, req->assoc, req->assoclen);
  579. return subreq;
  580. }
  581. static int crypto_rfc4309_encrypt(struct aead_request *req)
  582. {
  583. req = crypto_rfc4309_crypt(req);
  584. return crypto_aead_encrypt(req);
  585. }
  586. static int crypto_rfc4309_decrypt(struct aead_request *req)
  587. {
  588. req = crypto_rfc4309_crypt(req);
  589. return crypto_aead_decrypt(req);
  590. }
  591. static int crypto_rfc4309_init_tfm(struct crypto_tfm *tfm)
  592. {
  593. struct crypto_instance *inst = (void *)tfm->__crt_alg;
  594. struct crypto_aead_spawn *spawn = crypto_instance_ctx(inst);
  595. struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
  596. struct crypto_aead *aead;
  597. unsigned long align;
  598. aead = crypto_spawn_aead(spawn);
  599. if (IS_ERR(aead))
  600. return PTR_ERR(aead);
  601. ctx->child = aead;
  602. align = crypto_aead_alignmask(aead);
  603. align &= ~(crypto_tfm_ctx_alignment() - 1);
  604. tfm->crt_aead.reqsize = sizeof(struct aead_request) +
  605. ALIGN(crypto_aead_reqsize(aead),
  606. crypto_tfm_ctx_alignment()) +
  607. align + 16;
  608. return 0;
  609. }
  610. static void crypto_rfc4309_exit_tfm(struct crypto_tfm *tfm)
  611. {
  612. struct crypto_rfc4309_ctx *ctx = crypto_tfm_ctx(tfm);
  613. crypto_free_aead(ctx->child);
  614. }
  615. static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb)
  616. {
  617. struct crypto_attr_type *algt;
  618. struct crypto_instance *inst;
  619. struct crypto_aead_spawn *spawn;
  620. struct crypto_alg *alg;
  621. const char *ccm_name;
  622. int err;
  623. algt = crypto_get_attr_type(tb);
  624. err = PTR_ERR(algt);
  625. if (IS_ERR(algt))
  626. return ERR_PTR(err);
  627. if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
  628. return ERR_PTR(-EINVAL);
  629. ccm_name = crypto_attr_alg_name(tb[1]);
  630. err = PTR_ERR(ccm_name);
  631. if (IS_ERR(ccm_name))
  632. return ERR_PTR(err);
  633. inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
  634. if (!inst)
  635. return ERR_PTR(-ENOMEM);
  636. spawn = crypto_instance_ctx(inst);
  637. crypto_set_aead_spawn(spawn, inst);
  638. err = crypto_grab_aead(spawn, ccm_name, 0,
  639. crypto_requires_sync(algt->type, algt->mask));
  640. if (err)
  641. goto out_free_inst;
  642. alg = crypto_aead_spawn_alg(spawn);
  643. err = -EINVAL;
  644. /* We only support 16-byte blocks. */
  645. if (alg->cra_aead.ivsize != 16)
  646. goto out_drop_alg;
  647. /* Not a stream cipher? */
  648. if (alg->cra_blocksize != 1)
  649. goto out_drop_alg;
  650. err = -ENAMETOOLONG;
  651. if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
  652. "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME ||
  653. snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  654. "rfc4309(%s)", alg->cra_driver_name) >=
  655. CRYPTO_MAX_ALG_NAME)
  656. goto out_drop_alg;
  657. inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
  658. inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
  659. inst->alg.cra_priority = alg->cra_priority;
  660. inst->alg.cra_blocksize = 1;
  661. inst->alg.cra_alignmask = alg->cra_alignmask;
  662. inst->alg.cra_type = &crypto_nivaead_type;
  663. inst->alg.cra_aead.ivsize = 8;
  664. inst->alg.cra_aead.maxauthsize = 16;
  665. inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
  666. inst->alg.cra_init = crypto_rfc4309_init_tfm;
  667. inst->alg.cra_exit = crypto_rfc4309_exit_tfm;
  668. inst->alg.cra_aead.setkey = crypto_rfc4309_setkey;
  669. inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize;
  670. inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt;
  671. inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt;
  672. inst->alg.cra_aead.geniv = "seqiv";
  673. out:
  674. return inst;
  675. out_drop_alg:
  676. crypto_drop_aead(spawn);
  677. out_free_inst:
  678. kfree(inst);
  679. inst = ERR_PTR(err);
  680. goto out;
  681. }
  682. static void crypto_rfc4309_free(struct crypto_instance *inst)
  683. {
  684. crypto_drop_spawn(crypto_instance_ctx(inst));
  685. kfree(inst);
  686. }
  687. static struct crypto_template crypto_rfc4309_tmpl = {
  688. .name = "rfc4309",
  689. .alloc = crypto_rfc4309_alloc,
  690. .free = crypto_rfc4309_free,
  691. .module = THIS_MODULE,
  692. };
  693. static int __init crypto_ccm_module_init(void)
  694. {
  695. int err;
  696. err = crypto_register_template(&crypto_ccm_base_tmpl);
  697. if (err)
  698. goto out;
  699. err = crypto_register_template(&crypto_ccm_tmpl);
  700. if (err)
  701. goto out_undo_base;
  702. err = crypto_register_template(&crypto_rfc4309_tmpl);
  703. if (err)
  704. goto out_undo_ccm;
  705. out:
  706. return err;
  707. out_undo_ccm:
  708. crypto_unregister_template(&crypto_ccm_tmpl);
  709. out_undo_base:
  710. crypto_unregister_template(&crypto_ccm_base_tmpl);
  711. goto out;
  712. }
  713. static void __exit crypto_ccm_module_exit(void)
  714. {
  715. crypto_unregister_template(&crypto_rfc4309_tmpl);
  716. crypto_unregister_template(&crypto_ccm_tmpl);
  717. crypto_unregister_template(&crypto_ccm_base_tmpl);
  718. }
  719. module_init(crypto_ccm_module_init);
  720. module_exit(crypto_ccm_module_exit);
  721. MODULE_LICENSE("GPL");
  722. MODULE_DESCRIPTION("Counter with CBC MAC");
  723. MODULE_ALIAS("ccm_base");
  724. MODULE_ALIAS("rfc4309");