blkcipher.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. /*
  2. * Block chaining cipher operations.
  3. *
  4. * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  5. * multiple page boundaries by using temporary blocks. In user context,
  6. * the kernel is given a chance to schedule us once per page.
  7. *
  8. * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the Free
  12. * Software Foundation; either version 2 of the License, or (at your option)
  13. * any later version.
  14. *
  15. */
  16. #include <crypto/internal/skcipher.h>
  17. #include <crypto/scatterwalk.h>
  18. #include <linux/errno.h>
  19. #include <linux/hardirq.h>
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/slab.h>
  25. #include <linux/string.h>
  26. #include "internal.h"
  27. enum {
  28. BLKCIPHER_WALK_PHYS = 1 << 0,
  29. BLKCIPHER_WALK_SLOW = 1 << 1,
  30. BLKCIPHER_WALK_COPY = 1 << 2,
  31. BLKCIPHER_WALK_DIFF = 1 << 3,
  32. };
  33. static int blkcipher_walk_next(struct blkcipher_desc *desc,
  34. struct blkcipher_walk *walk);
  35. static int blkcipher_walk_first(struct blkcipher_desc *desc,
  36. struct blkcipher_walk *walk);
  37. static inline void blkcipher_map_src(struct blkcipher_walk *walk)
  38. {
  39. walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
  40. }
  41. static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
  42. {
  43. walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
  44. }
  45. static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
  46. {
  47. scatterwalk_unmap(walk->src.virt.addr, 0);
  48. }
  49. static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
  50. {
  51. scatterwalk_unmap(walk->dst.virt.addr, 1);
  52. }
  53. /* Get a spot of the specified length that does not straddle a page.
  54. * The caller needs to ensure that there is enough space for this operation.
  55. */
  56. static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
  57. {
  58. u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  59. return max(start, end_page);
  60. }
  61. static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
  62. struct blkcipher_walk *walk,
  63. unsigned int bsize)
  64. {
  65. u8 *addr;
  66. unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  67. addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
  68. addr = blkcipher_get_spot(addr, bsize);
  69. scatterwalk_copychunks(addr, &walk->out, bsize, 1);
  70. return bsize;
  71. }
  72. static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
  73. unsigned int n)
  74. {
  75. if (walk->flags & BLKCIPHER_WALK_COPY) {
  76. blkcipher_map_dst(walk);
  77. memcpy(walk->dst.virt.addr, walk->page, n);
  78. blkcipher_unmap_dst(walk);
  79. } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
  80. if (walk->flags & BLKCIPHER_WALK_DIFF)
  81. blkcipher_unmap_dst(walk);
  82. blkcipher_unmap_src(walk);
  83. }
  84. scatterwalk_advance(&walk->in, n);
  85. scatterwalk_advance(&walk->out, n);
  86. return n;
  87. }
  88. int blkcipher_walk_done(struct blkcipher_desc *desc,
  89. struct blkcipher_walk *walk, int err)
  90. {
  91. struct crypto_blkcipher *tfm = desc->tfm;
  92. unsigned int nbytes = 0;
  93. if (likely(err >= 0)) {
  94. unsigned int n = walk->nbytes - err;
  95. if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
  96. n = blkcipher_done_fast(walk, n);
  97. else if (WARN_ON(err)) {
  98. err = -EINVAL;
  99. goto err;
  100. } else
  101. n = blkcipher_done_slow(tfm, walk, n);
  102. nbytes = walk->total - n;
  103. err = 0;
  104. }
  105. scatterwalk_done(&walk->in, 0, nbytes);
  106. scatterwalk_done(&walk->out, 1, nbytes);
  107. err:
  108. walk->total = nbytes;
  109. walk->nbytes = nbytes;
  110. if (nbytes) {
  111. crypto_yield(desc->flags);
  112. return blkcipher_walk_next(desc, walk);
  113. }
  114. if (walk->iv != desc->info)
  115. memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
  116. if (walk->buffer != walk->page)
  117. kfree(walk->buffer);
  118. if (walk->page)
  119. free_page((unsigned long)walk->page);
  120. return err;
  121. }
  122. EXPORT_SYMBOL_GPL(blkcipher_walk_done);
  123. static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
  124. struct blkcipher_walk *walk,
  125. unsigned int bsize,
  126. unsigned int alignmask)
  127. {
  128. unsigned int n;
  129. unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
  130. if (walk->buffer)
  131. goto ok;
  132. walk->buffer = walk->page;
  133. if (walk->buffer)
  134. goto ok;
  135. n = aligned_bsize * 3 - (alignmask + 1) +
  136. (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
  137. walk->buffer = kmalloc(n, GFP_ATOMIC);
  138. if (!walk->buffer)
  139. return blkcipher_walk_done(desc, walk, -ENOMEM);
  140. ok:
  141. walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
  142. alignmask + 1);
  143. walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
  144. walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
  145. aligned_bsize, bsize);
  146. scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
  147. walk->nbytes = bsize;
  148. walk->flags |= BLKCIPHER_WALK_SLOW;
  149. return 0;
  150. }
  151. static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
  152. {
  153. u8 *tmp = walk->page;
  154. blkcipher_map_src(walk);
  155. memcpy(tmp, walk->src.virt.addr, walk->nbytes);
  156. blkcipher_unmap_src(walk);
  157. walk->src.virt.addr = tmp;
  158. walk->dst.virt.addr = tmp;
  159. return 0;
  160. }
  161. static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
  162. struct blkcipher_walk *walk)
  163. {
  164. unsigned long diff;
  165. walk->src.phys.page = scatterwalk_page(&walk->in);
  166. walk->src.phys.offset = offset_in_page(walk->in.offset);
  167. walk->dst.phys.page = scatterwalk_page(&walk->out);
  168. walk->dst.phys.offset = offset_in_page(walk->out.offset);
  169. if (walk->flags & BLKCIPHER_WALK_PHYS)
  170. return 0;
  171. diff = walk->src.phys.offset - walk->dst.phys.offset;
  172. diff |= walk->src.virt.page - walk->dst.virt.page;
  173. blkcipher_map_src(walk);
  174. walk->dst.virt.addr = walk->src.virt.addr;
  175. if (diff) {
  176. walk->flags |= BLKCIPHER_WALK_DIFF;
  177. blkcipher_map_dst(walk);
  178. }
  179. return 0;
  180. }
  181. static int blkcipher_walk_next(struct blkcipher_desc *desc,
  182. struct blkcipher_walk *walk)
  183. {
  184. struct crypto_blkcipher *tfm = desc->tfm;
  185. unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  186. unsigned int bsize;
  187. unsigned int n;
  188. int err;
  189. n = walk->total;
  190. if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
  191. desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  192. return blkcipher_walk_done(desc, walk, -EINVAL);
  193. }
  194. walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
  195. BLKCIPHER_WALK_DIFF);
  196. if (!scatterwalk_aligned(&walk->in, alignmask) ||
  197. !scatterwalk_aligned(&walk->out, alignmask)) {
  198. walk->flags |= BLKCIPHER_WALK_COPY;
  199. if (!walk->page) {
  200. walk->page = (void *)__get_free_page(GFP_ATOMIC);
  201. if (!walk->page)
  202. n = 0;
  203. }
  204. }
  205. bsize = min(walk->blocksize, n);
  206. n = scatterwalk_clamp(&walk->in, n);
  207. n = scatterwalk_clamp(&walk->out, n);
  208. if (unlikely(n < bsize)) {
  209. err = blkcipher_next_slow(desc, walk, bsize, alignmask);
  210. goto set_phys_lowmem;
  211. }
  212. walk->nbytes = n;
  213. if (walk->flags & BLKCIPHER_WALK_COPY) {
  214. err = blkcipher_next_copy(walk);
  215. goto set_phys_lowmem;
  216. }
  217. return blkcipher_next_fast(desc, walk);
  218. set_phys_lowmem:
  219. if (walk->flags & BLKCIPHER_WALK_PHYS) {
  220. walk->src.phys.page = virt_to_page(walk->src.virt.addr);
  221. walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
  222. walk->src.phys.offset &= PAGE_SIZE - 1;
  223. walk->dst.phys.offset &= PAGE_SIZE - 1;
  224. }
  225. return err;
  226. }
  227. static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
  228. struct crypto_blkcipher *tfm,
  229. unsigned int alignmask)
  230. {
  231. unsigned bs = walk->blocksize;
  232. unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
  233. unsigned aligned_bs = ALIGN(bs, alignmask + 1);
  234. unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
  235. (alignmask + 1);
  236. u8 *iv;
  237. size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
  238. walk->buffer = kmalloc(size, GFP_ATOMIC);
  239. if (!walk->buffer)
  240. return -ENOMEM;
  241. iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
  242. iv = blkcipher_get_spot(iv, bs) + aligned_bs;
  243. iv = blkcipher_get_spot(iv, bs) + aligned_bs;
  244. iv = blkcipher_get_spot(iv, ivsize);
  245. walk->iv = memcpy(iv, walk->iv, ivsize);
  246. return 0;
  247. }
  248. int blkcipher_walk_virt(struct blkcipher_desc *desc,
  249. struct blkcipher_walk *walk)
  250. {
  251. walk->flags &= ~BLKCIPHER_WALK_PHYS;
  252. walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
  253. return blkcipher_walk_first(desc, walk);
  254. }
  255. EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
  256. int blkcipher_walk_phys(struct blkcipher_desc *desc,
  257. struct blkcipher_walk *walk)
  258. {
  259. walk->flags |= BLKCIPHER_WALK_PHYS;
  260. walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
  261. return blkcipher_walk_first(desc, walk);
  262. }
  263. EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
  264. static int blkcipher_walk_first(struct blkcipher_desc *desc,
  265. struct blkcipher_walk *walk)
  266. {
  267. struct crypto_blkcipher *tfm = desc->tfm;
  268. unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  269. if (WARN_ON_ONCE(in_irq()))
  270. return -EDEADLK;
  271. walk->nbytes = walk->total;
  272. if (unlikely(!walk->total))
  273. return 0;
  274. walk->buffer = NULL;
  275. walk->iv = desc->info;
  276. if (unlikely(((unsigned long)walk->iv & alignmask))) {
  277. int err = blkcipher_copy_iv(walk, tfm, alignmask);
  278. if (err)
  279. return err;
  280. }
  281. scatterwalk_start(&walk->in, walk->in.sg);
  282. scatterwalk_start(&walk->out, walk->out.sg);
  283. walk->page = NULL;
  284. return blkcipher_walk_next(desc, walk);
  285. }
  286. int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
  287. struct blkcipher_walk *walk,
  288. unsigned int blocksize)
  289. {
  290. walk->flags &= ~BLKCIPHER_WALK_PHYS;
  291. walk->blocksize = blocksize;
  292. return blkcipher_walk_first(desc, walk);
  293. }
  294. EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
  295. static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
  296. unsigned int keylen)
  297. {
  298. struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  299. unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
  300. int ret;
  301. u8 *buffer, *alignbuffer;
  302. unsigned long absize;
  303. absize = keylen + alignmask;
  304. buffer = kmalloc(absize, GFP_ATOMIC);
  305. if (!buffer)
  306. return -ENOMEM;
  307. alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  308. memcpy(alignbuffer, key, keylen);
  309. ret = cipher->setkey(tfm, alignbuffer, keylen);
  310. memset(alignbuffer, 0, keylen);
  311. kfree(buffer);
  312. return ret;
  313. }
  314. static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
  315. {
  316. struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  317. unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
  318. if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
  319. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  320. return -EINVAL;
  321. }
  322. if ((unsigned long)key & alignmask)
  323. return setkey_unaligned(tfm, key, keylen);
  324. return cipher->setkey(tfm, key, keylen);
  325. }
  326. static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  327. unsigned int keylen)
  328. {
  329. return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
  330. }
  331. static int async_encrypt(struct ablkcipher_request *req)
  332. {
  333. struct crypto_tfm *tfm = req->base.tfm;
  334. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  335. struct blkcipher_desc desc = {
  336. .tfm = __crypto_blkcipher_cast(tfm),
  337. .info = req->info,
  338. .flags = req->base.flags,
  339. };
  340. return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
  341. }
  342. static int async_decrypt(struct ablkcipher_request *req)
  343. {
  344. struct crypto_tfm *tfm = req->base.tfm;
  345. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  346. struct blkcipher_desc desc = {
  347. .tfm = __crypto_blkcipher_cast(tfm),
  348. .info = req->info,
  349. .flags = req->base.flags,
  350. };
  351. return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
  352. }
  353. static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
  354. u32 mask)
  355. {
  356. struct blkcipher_alg *cipher = &alg->cra_blkcipher;
  357. unsigned int len = alg->cra_ctxsize;
  358. if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
  359. cipher->ivsize) {
  360. len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
  361. len += cipher->ivsize;
  362. }
  363. return len;
  364. }
  365. static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
  366. {
  367. struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  368. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  369. crt->setkey = async_setkey;
  370. crt->encrypt = async_encrypt;
  371. crt->decrypt = async_decrypt;
  372. if (!alg->ivsize) {
  373. crt->givencrypt = skcipher_null_givencrypt;
  374. crt->givdecrypt = skcipher_null_givdecrypt;
  375. }
  376. crt->base = __crypto_ablkcipher_cast(tfm);
  377. crt->ivsize = alg->ivsize;
  378. return 0;
  379. }
  380. static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
  381. {
  382. struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
  383. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  384. unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
  385. unsigned long addr;
  386. crt->setkey = setkey;
  387. crt->encrypt = alg->encrypt;
  388. crt->decrypt = alg->decrypt;
  389. addr = (unsigned long)crypto_tfm_ctx(tfm);
  390. addr = ALIGN(addr, align);
  391. addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
  392. crt->iv = (void *)addr;
  393. return 0;
  394. }
  395. static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
  396. {
  397. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  398. if (alg->ivsize > PAGE_SIZE / 8)
  399. return -EINVAL;
  400. if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
  401. return crypto_init_blkcipher_ops_sync(tfm);
  402. else
  403. return crypto_init_blkcipher_ops_async(tfm);
  404. }
  405. static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  406. __attribute__ ((unused));
  407. static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  408. {
  409. seq_printf(m, "type : blkcipher\n");
  410. seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  411. seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
  412. seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
  413. seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
  414. seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
  415. "<default>");
  416. }
  417. const struct crypto_type crypto_blkcipher_type = {
  418. .ctxsize = crypto_blkcipher_ctxsize,
  419. .init = crypto_init_blkcipher_ops,
  420. #ifdef CONFIG_PROC_FS
  421. .show = crypto_blkcipher_show,
  422. #endif
  423. };
  424. EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
  425. static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
  426. const char *name, u32 type, u32 mask)
  427. {
  428. struct crypto_alg *alg;
  429. int err;
  430. type = crypto_skcipher_type(type);
  431. mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
  432. alg = crypto_alg_mod_lookup(name, type, mask);
  433. if (IS_ERR(alg))
  434. return PTR_ERR(alg);
  435. err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
  436. crypto_mod_put(alg);
  437. return err;
  438. }
  439. struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
  440. struct rtattr **tb, u32 type,
  441. u32 mask)
  442. {
  443. struct {
  444. int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
  445. unsigned int keylen);
  446. int (*encrypt)(struct ablkcipher_request *req);
  447. int (*decrypt)(struct ablkcipher_request *req);
  448. unsigned int min_keysize;
  449. unsigned int max_keysize;
  450. unsigned int ivsize;
  451. const char *geniv;
  452. } balg;
  453. const char *name;
  454. struct crypto_skcipher_spawn *spawn;
  455. struct crypto_attr_type *algt;
  456. struct crypto_instance *inst;
  457. struct crypto_alg *alg;
  458. int err;
  459. algt = crypto_get_attr_type(tb);
  460. err = PTR_ERR(algt);
  461. if (IS_ERR(algt))
  462. return ERR_PTR(err);
  463. if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
  464. algt->mask)
  465. return ERR_PTR(-EINVAL);
  466. name = crypto_attr_alg_name(tb[1]);
  467. err = PTR_ERR(name);
  468. if (IS_ERR(name))
  469. return ERR_PTR(err);
  470. inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
  471. if (!inst)
  472. return ERR_PTR(-ENOMEM);
  473. spawn = crypto_instance_ctx(inst);
  474. /* Ignore async algorithms if necessary. */
  475. mask |= crypto_requires_sync(algt->type, algt->mask);
  476. crypto_set_skcipher_spawn(spawn, inst);
  477. err = crypto_grab_nivcipher(spawn, name, type, mask);
  478. if (err)
  479. goto err_free_inst;
  480. alg = crypto_skcipher_spawn_alg(spawn);
  481. if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
  482. CRYPTO_ALG_TYPE_BLKCIPHER) {
  483. balg.ivsize = alg->cra_blkcipher.ivsize;
  484. balg.min_keysize = alg->cra_blkcipher.min_keysize;
  485. balg.max_keysize = alg->cra_blkcipher.max_keysize;
  486. balg.setkey = async_setkey;
  487. balg.encrypt = async_encrypt;
  488. balg.decrypt = async_decrypt;
  489. balg.geniv = alg->cra_blkcipher.geniv;
  490. } else {
  491. balg.ivsize = alg->cra_ablkcipher.ivsize;
  492. balg.min_keysize = alg->cra_ablkcipher.min_keysize;
  493. balg.max_keysize = alg->cra_ablkcipher.max_keysize;
  494. balg.setkey = alg->cra_ablkcipher.setkey;
  495. balg.encrypt = alg->cra_ablkcipher.encrypt;
  496. balg.decrypt = alg->cra_ablkcipher.decrypt;
  497. balg.geniv = alg->cra_ablkcipher.geniv;
  498. }
  499. err = -EINVAL;
  500. if (!balg.ivsize)
  501. goto err_drop_alg;
  502. /*
  503. * This is only true if we're constructing an algorithm with its
  504. * default IV generator. For the default generator we elide the
  505. * template name and double-check the IV generator.
  506. */
  507. if (algt->mask & CRYPTO_ALG_GENIV) {
  508. if (!balg.geniv)
  509. balg.geniv = crypto_default_geniv(alg);
  510. err = -EAGAIN;
  511. if (strcmp(tmpl->name, balg.geniv))
  512. goto err_drop_alg;
  513. memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
  514. memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
  515. CRYPTO_MAX_ALG_NAME);
  516. } else {
  517. err = -ENAMETOOLONG;
  518. if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
  519. "%s(%s)", tmpl->name, alg->cra_name) >=
  520. CRYPTO_MAX_ALG_NAME)
  521. goto err_drop_alg;
  522. if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  523. "%s(%s)", tmpl->name, alg->cra_driver_name) >=
  524. CRYPTO_MAX_ALG_NAME)
  525. goto err_drop_alg;
  526. }
  527. inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
  528. inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
  529. inst->alg.cra_priority = alg->cra_priority;
  530. inst->alg.cra_blocksize = alg->cra_blocksize;
  531. inst->alg.cra_alignmask = alg->cra_alignmask;
  532. inst->alg.cra_type = &crypto_givcipher_type;
  533. inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
  534. inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
  535. inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
  536. inst->alg.cra_ablkcipher.geniv = balg.geniv;
  537. inst->alg.cra_ablkcipher.setkey = balg.setkey;
  538. inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
  539. inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
  540. out:
  541. return inst;
  542. err_drop_alg:
  543. crypto_drop_skcipher(spawn);
  544. err_free_inst:
  545. kfree(inst);
  546. inst = ERR_PTR(err);
  547. goto out;
  548. }
  549. EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
  550. void skcipher_geniv_free(struct crypto_instance *inst)
  551. {
  552. crypto_drop_skcipher(crypto_instance_ctx(inst));
  553. kfree(inst);
  554. }
  555. EXPORT_SYMBOL_GPL(skcipher_geniv_free);
  556. int skcipher_geniv_init(struct crypto_tfm *tfm)
  557. {
  558. struct crypto_instance *inst = (void *)tfm->__crt_alg;
  559. struct crypto_ablkcipher *cipher;
  560. cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
  561. if (IS_ERR(cipher))
  562. return PTR_ERR(cipher);
  563. tfm->crt_ablkcipher.base = cipher;
  564. tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
  565. return 0;
  566. }
  567. EXPORT_SYMBOL_GPL(skcipher_geniv_init);
  568. void skcipher_geniv_exit(struct crypto_tfm *tfm)
  569. {
  570. crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
  571. }
  572. EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
  573. MODULE_LICENSE("GPL");
  574. MODULE_DESCRIPTION("Generic block chaining cipher type");