blkcipher.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762
  1. /*
  2. * Block chaining cipher operations.
  3. *
  4. * Generic encrypt/decrypt wrapper for ciphers, handles operations across
  5. * multiple page boundaries by using temporary blocks. In user context,
  6. * the kernel is given a chance to schedule us once per page.
  7. *
  8. * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the Free
  12. * Software Foundation; either version 2 of the License, or (at your option)
  13. * any later version.
  14. *
  15. */
  16. #include <crypto/internal/skcipher.h>
  17. #include <crypto/scatterwalk.h>
  18. #include <linux/errno.h>
  19. #include <linux/hardirq.h>
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/scatterlist.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/slab.h>
  25. #include <linux/string.h>
  26. #include <linux/cryptouser.h>
  27. #include <net/netlink.h>
  28. #include "internal.h"
  29. enum {
  30. BLKCIPHER_WALK_PHYS = 1 << 0,
  31. BLKCIPHER_WALK_SLOW = 1 << 1,
  32. BLKCIPHER_WALK_COPY = 1 << 2,
  33. BLKCIPHER_WALK_DIFF = 1 << 3,
  34. };
  35. static int blkcipher_walk_next(struct blkcipher_desc *desc,
  36. struct blkcipher_walk *walk);
  37. static int blkcipher_walk_first(struct blkcipher_desc *desc,
  38. struct blkcipher_walk *walk);
  39. static inline void blkcipher_map_src(struct blkcipher_walk *walk)
  40. {
  41. walk->src.virt.addr = scatterwalk_map(&walk->in);
  42. }
  43. static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
  44. {
  45. walk->dst.virt.addr = scatterwalk_map(&walk->out);
  46. }
  47. static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
  48. {
  49. scatterwalk_unmap(walk->src.virt.addr);
  50. }
  51. static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
  52. {
  53. scatterwalk_unmap(walk->dst.virt.addr);
  54. }
  55. /* Get a spot of the specified length that does not straddle a page.
  56. * The caller needs to ensure that there is enough space for this operation.
  57. */
  58. static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
  59. {
  60. u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
  61. return max(start, end_page);
  62. }
  63. static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
  64. struct blkcipher_walk *walk,
  65. unsigned int bsize)
  66. {
  67. u8 *addr;
  68. unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  69. addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
  70. addr = blkcipher_get_spot(addr, bsize);
  71. scatterwalk_copychunks(addr, &walk->out, bsize, 1);
  72. return bsize;
  73. }
  74. static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
  75. unsigned int n)
  76. {
  77. if (walk->flags & BLKCIPHER_WALK_COPY) {
  78. blkcipher_map_dst(walk);
  79. memcpy(walk->dst.virt.addr, walk->page, n);
  80. blkcipher_unmap_dst(walk);
  81. } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
  82. if (walk->flags & BLKCIPHER_WALK_DIFF)
  83. blkcipher_unmap_dst(walk);
  84. blkcipher_unmap_src(walk);
  85. }
  86. scatterwalk_advance(&walk->in, n);
  87. scatterwalk_advance(&walk->out, n);
  88. return n;
  89. }
  90. int blkcipher_walk_done(struct blkcipher_desc *desc,
  91. struct blkcipher_walk *walk, int err)
  92. {
  93. struct crypto_blkcipher *tfm = desc->tfm;
  94. unsigned int nbytes = 0;
  95. #ifdef CONFIG_CRYPTO_FIPS
  96. if (unlikely(in_fips_err()))
  97. return (-EACCES);
  98. #endif
  99. if (likely(err >= 0)) {
  100. unsigned int n = walk->nbytes - err;
  101. if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
  102. n = blkcipher_done_fast(walk, n);
  103. else if (WARN_ON(err)) {
  104. err = -EINVAL;
  105. goto err;
  106. } else
  107. n = blkcipher_done_slow(tfm, walk, n);
  108. nbytes = walk->total - n;
  109. err = 0;
  110. }
  111. scatterwalk_done(&walk->in, 0, nbytes);
  112. scatterwalk_done(&walk->out, 1, nbytes);
  113. err:
  114. walk->total = nbytes;
  115. walk->nbytes = nbytes;
  116. if (nbytes) {
  117. crypto_yield(desc->flags);
  118. return blkcipher_walk_next(desc, walk);
  119. }
  120. if (walk->iv != desc->info)
  121. memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
  122. if (walk->buffer != walk->page)
  123. kfree(walk->buffer);
  124. if (walk->page)
  125. free_page((unsigned long)walk->page);
  126. return err;
  127. }
  128. EXPORT_SYMBOL_GPL(blkcipher_walk_done);
  129. static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
  130. struct blkcipher_walk *walk,
  131. unsigned int bsize,
  132. unsigned int alignmask)
  133. {
  134. unsigned int n;
  135. unsigned aligned_bsize = ALIGN(bsize, alignmask + 1);
  136. if (walk->buffer)
  137. goto ok;
  138. walk->buffer = walk->page;
  139. if (walk->buffer)
  140. goto ok;
  141. n = aligned_bsize * 3 - (alignmask + 1) +
  142. (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
  143. walk->buffer = kmalloc(n, GFP_ATOMIC);
  144. if (!walk->buffer)
  145. return blkcipher_walk_done(desc, walk, -ENOMEM);
  146. ok:
  147. walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
  148. alignmask + 1);
  149. walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
  150. walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr +
  151. aligned_bsize, bsize);
  152. scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
  153. walk->nbytes = bsize;
  154. walk->flags |= BLKCIPHER_WALK_SLOW;
  155. return 0;
  156. }
  157. static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
  158. {
  159. u8 *tmp = walk->page;
  160. blkcipher_map_src(walk);
  161. memcpy(tmp, walk->src.virt.addr, walk->nbytes);
  162. blkcipher_unmap_src(walk);
  163. walk->src.virt.addr = tmp;
  164. walk->dst.virt.addr = tmp;
  165. return 0;
  166. }
  167. static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
  168. struct blkcipher_walk *walk)
  169. {
  170. unsigned long diff;
  171. walk->src.phys.page = scatterwalk_page(&walk->in);
  172. walk->src.phys.offset = offset_in_page(walk->in.offset);
  173. walk->dst.phys.page = scatterwalk_page(&walk->out);
  174. walk->dst.phys.offset = offset_in_page(walk->out.offset);
  175. if (walk->flags & BLKCIPHER_WALK_PHYS)
  176. return 0;
  177. diff = walk->src.phys.offset - walk->dst.phys.offset;
  178. diff |= walk->src.virt.page - walk->dst.virt.page;
  179. blkcipher_map_src(walk);
  180. walk->dst.virt.addr = walk->src.virt.addr;
  181. if (diff) {
  182. walk->flags |= BLKCIPHER_WALK_DIFF;
  183. blkcipher_map_dst(walk);
  184. }
  185. return 0;
  186. }
  187. static int blkcipher_walk_next(struct blkcipher_desc *desc,
  188. struct blkcipher_walk *walk)
  189. {
  190. struct crypto_blkcipher *tfm = desc->tfm;
  191. unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  192. unsigned int bsize;
  193. unsigned int n;
  194. int err;
  195. n = walk->total;
  196. if (unlikely(n < crypto_blkcipher_blocksize(tfm))) {
  197. desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
  198. return blkcipher_walk_done(desc, walk, -EINVAL);
  199. }
  200. walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
  201. BLKCIPHER_WALK_DIFF);
  202. if (!scatterwalk_aligned(&walk->in, alignmask) ||
  203. !scatterwalk_aligned(&walk->out, alignmask)) {
  204. walk->flags |= BLKCIPHER_WALK_COPY;
  205. if (!walk->page) {
  206. walk->page = (void *)__get_free_page(GFP_ATOMIC);
  207. if (!walk->page)
  208. n = 0;
  209. }
  210. }
  211. bsize = min(walk->blocksize, n);
  212. n = scatterwalk_clamp(&walk->in, n);
  213. n = scatterwalk_clamp(&walk->out, n);
  214. if (unlikely(n < bsize)) {
  215. err = blkcipher_next_slow(desc, walk, bsize, alignmask);
  216. goto set_phys_lowmem;
  217. }
  218. walk->nbytes = n;
  219. if (walk->flags & BLKCIPHER_WALK_COPY) {
  220. err = blkcipher_next_copy(walk);
  221. goto set_phys_lowmem;
  222. }
  223. return blkcipher_next_fast(desc, walk);
  224. set_phys_lowmem:
  225. if (walk->flags & BLKCIPHER_WALK_PHYS) {
  226. walk->src.phys.page = virt_to_page(walk->src.virt.addr);
  227. walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
  228. walk->src.phys.offset &= PAGE_SIZE - 1;
  229. walk->dst.phys.offset &= PAGE_SIZE - 1;
  230. }
  231. return err;
  232. }
  233. static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
  234. struct crypto_blkcipher *tfm,
  235. unsigned int alignmask)
  236. {
  237. unsigned bs = walk->blocksize;
  238. unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
  239. unsigned aligned_bs = ALIGN(bs, alignmask + 1);
  240. unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) -
  241. (alignmask + 1);
  242. u8 *iv;
  243. size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
  244. walk->buffer = kmalloc(size, GFP_ATOMIC);
  245. if (!walk->buffer)
  246. return -ENOMEM;
  247. iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
  248. iv = blkcipher_get_spot(iv, bs) + aligned_bs;
  249. iv = blkcipher_get_spot(iv, bs) + aligned_bs;
  250. iv = blkcipher_get_spot(iv, ivsize);
  251. walk->iv = memcpy(iv, walk->iv, ivsize);
  252. return 0;
  253. }
  254. int blkcipher_walk_virt(struct blkcipher_desc *desc,
  255. struct blkcipher_walk *walk)
  256. {
  257. walk->flags &= ~BLKCIPHER_WALK_PHYS;
  258. walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
  259. return blkcipher_walk_first(desc, walk);
  260. }
  261. EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
  262. int blkcipher_walk_phys(struct blkcipher_desc *desc,
  263. struct blkcipher_walk *walk)
  264. {
  265. walk->flags |= BLKCIPHER_WALK_PHYS;
  266. walk->blocksize = crypto_blkcipher_blocksize(desc->tfm);
  267. return blkcipher_walk_first(desc, walk);
  268. }
  269. EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
  270. static int blkcipher_walk_first(struct blkcipher_desc *desc,
  271. struct blkcipher_walk *walk)
  272. {
  273. struct crypto_blkcipher *tfm = desc->tfm;
  274. unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  275. #ifdef CONFIG_CRYPTO_FIPS
  276. if (unlikely(in_fips_err()))
  277. return (-EACCES);
  278. #endif
  279. if (WARN_ON_ONCE(in_irq()))
  280. return -EDEADLK;
  281. walk->iv = desc->info;
  282. walk->nbytes = walk->total;
  283. if (unlikely(!walk->total))
  284. return 0;
  285. walk->buffer = NULL;
  286. if (unlikely(((unsigned long)walk->iv & alignmask))) {
  287. int err = blkcipher_copy_iv(walk, tfm, alignmask);
  288. if (err)
  289. return err;
  290. }
  291. scatterwalk_start(&walk->in, walk->in.sg);
  292. scatterwalk_start(&walk->out, walk->out.sg);
  293. walk->page = NULL;
  294. return blkcipher_walk_next(desc, walk);
  295. }
  296. int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
  297. struct blkcipher_walk *walk,
  298. unsigned int blocksize)
  299. {
  300. walk->flags &= ~BLKCIPHER_WALK_PHYS;
  301. walk->blocksize = blocksize;
  302. return blkcipher_walk_first(desc, walk);
  303. }
  304. EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
  305. static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
  306. unsigned int keylen)
  307. {
  308. struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  309. unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
  310. int ret;
  311. u8 *buffer, *alignbuffer;
  312. unsigned long absize;
  313. absize = keylen + alignmask;
  314. buffer = kmalloc(absize, GFP_ATOMIC);
  315. if (!buffer)
  316. return -ENOMEM;
  317. alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  318. memcpy(alignbuffer, key, keylen);
  319. ret = cipher->setkey(tfm, alignbuffer, keylen);
  320. memset(alignbuffer, 0, keylen);
  321. kfree(buffer);
  322. return ret;
  323. }
  324. static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
  325. {
  326. struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
  327. unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
  328. if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
  329. tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  330. return -EINVAL;
  331. }
  332. if ((unsigned long)key & alignmask)
  333. return setkey_unaligned(tfm, key, keylen);
  334. return cipher->setkey(tfm, key, keylen);
  335. }
  336. static int async_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  337. unsigned int keylen)
  338. {
  339. return setkey(crypto_ablkcipher_tfm(tfm), key, keylen);
  340. }
  341. static int async_encrypt(struct ablkcipher_request *req)
  342. {
  343. struct crypto_tfm *tfm = req->base.tfm;
  344. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  345. struct blkcipher_desc desc = {
  346. .tfm = __crypto_blkcipher_cast(tfm),
  347. .info = req->info,
  348. .flags = req->base.flags,
  349. };
  350. #ifdef CONFIG_CRYPTO_FIPS
  351. if (unlikely(in_fips_err()))
  352. return (-EACCES);
  353. #endif
  354. return alg->encrypt(&desc, req->dst, req->src, req->nbytes);
  355. }
  356. static int async_decrypt(struct ablkcipher_request *req)
  357. {
  358. struct crypto_tfm *tfm = req->base.tfm;
  359. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  360. struct blkcipher_desc desc = {
  361. .tfm = __crypto_blkcipher_cast(tfm),
  362. .info = req->info,
  363. .flags = req->base.flags,
  364. };
  365. #ifdef CONFIG_CRYPTO_FIPS
  366. if (unlikely(in_fips_err()))
  367. return (-EACCES);
  368. #endif
  369. return alg->decrypt(&desc, req->dst, req->src, req->nbytes);
  370. }
  371. static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
  372. u32 mask)
  373. {
  374. struct blkcipher_alg *cipher = &alg->cra_blkcipher;
  375. unsigned int len = alg->cra_ctxsize;
  376. if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK &&
  377. cipher->ivsize) {
  378. len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
  379. len += cipher->ivsize;
  380. }
  381. return len;
  382. }
  383. static int crypto_init_blkcipher_ops_async(struct crypto_tfm *tfm)
  384. {
  385. struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
  386. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  387. crt->setkey = async_setkey;
  388. crt->encrypt = async_encrypt;
  389. crt->decrypt = async_decrypt;
  390. if (!alg->ivsize) {
  391. crt->givencrypt = skcipher_null_givencrypt;
  392. crt->givdecrypt = skcipher_null_givdecrypt;
  393. }
  394. crt->base = __crypto_ablkcipher_cast(tfm);
  395. crt->ivsize = alg->ivsize;
  396. return 0;
  397. }
  398. static int crypto_init_blkcipher_ops_sync(struct crypto_tfm *tfm)
  399. {
  400. struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
  401. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  402. unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
  403. unsigned long addr;
  404. crt->setkey = setkey;
  405. crt->encrypt = alg->encrypt;
  406. crt->decrypt = alg->decrypt;
  407. addr = (unsigned long)crypto_tfm_ctx(tfm);
  408. addr = ALIGN(addr, align);
  409. addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
  410. crt->iv = (void *)addr;
  411. return 0;
  412. }
  413. static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
  414. {
  415. struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
  416. if (alg->ivsize > PAGE_SIZE / 8)
  417. return -EINVAL;
  418. if ((mask & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_MASK)
  419. return crypto_init_blkcipher_ops_sync(tfm);
  420. else
  421. return crypto_init_blkcipher_ops_async(tfm);
  422. }
  423. #ifdef CONFIG_NET
  424. static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  425. {
  426. struct crypto_report_blkcipher rblkcipher;
  427. strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
  428. strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
  429. sizeof(rblkcipher.geniv));
  430. rblkcipher.blocksize = alg->cra_blocksize;
  431. rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
  432. rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
  433. rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
  434. NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
  435. sizeof(struct crypto_report_blkcipher), &rblkcipher);
  436. return 0;
  437. nla_put_failure:
  438. return -EMSGSIZE;
  439. }
  440. #else
  441. static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
  442. {
  443. return -ENOSYS;
  444. }
  445. #endif
  446. static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  447. __attribute__ ((unused));
  448. static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
  449. {
  450. seq_printf(m, "type : blkcipher\n");
  451. seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  452. seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
  453. seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
  454. seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
  455. seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
  456. "<default>");
  457. }
  458. const struct crypto_type crypto_blkcipher_type = {
  459. .ctxsize = crypto_blkcipher_ctxsize,
  460. .init = crypto_init_blkcipher_ops,
  461. #ifdef CONFIG_PROC_FS
  462. .show = crypto_blkcipher_show,
  463. #endif
  464. .report = crypto_blkcipher_report,
  465. };
  466. EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
  467. static int crypto_grab_nivcipher(struct crypto_skcipher_spawn *spawn,
  468. const char *name, u32 type, u32 mask)
  469. {
  470. struct crypto_alg *alg;
  471. int err;
  472. type = crypto_skcipher_type(type);
  473. mask = crypto_skcipher_mask(mask)| CRYPTO_ALG_GENIV;
  474. alg = crypto_alg_mod_lookup(name, type, mask);
  475. if (IS_ERR(alg))
  476. return PTR_ERR(alg);
  477. err = crypto_init_spawn(&spawn->base, alg, spawn->base.inst, mask);
  478. crypto_mod_put(alg);
  479. return err;
  480. }
  481. struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
  482. struct rtattr **tb, u32 type,
  483. u32 mask)
  484. {
  485. struct {
  486. int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
  487. unsigned int keylen);
  488. int (*encrypt)(struct ablkcipher_request *req);
  489. int (*decrypt)(struct ablkcipher_request *req);
  490. unsigned int min_keysize;
  491. unsigned int max_keysize;
  492. unsigned int ivsize;
  493. const char *geniv;
  494. } balg;
  495. const char *name;
  496. struct crypto_skcipher_spawn *spawn;
  497. struct crypto_attr_type *algt;
  498. struct crypto_instance *inst;
  499. struct crypto_alg *alg;
  500. int err;
  501. #ifdef CONFIG_CRYPTO_FIPS
  502. if (unlikely(in_fips_err()))
  503. return ERR_PTR(-EACCES);
  504. #endif
  505. algt = crypto_get_attr_type(tb);
  506. err = PTR_ERR(algt);
  507. if (IS_ERR(algt))
  508. return ERR_PTR(err);
  509. if ((algt->type ^ (CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV)) &
  510. algt->mask)
  511. return ERR_PTR(-EINVAL);
  512. name = crypto_attr_alg_name(tb[1]);
  513. err = PTR_ERR(name);
  514. if (IS_ERR(name))
  515. return ERR_PTR(err);
  516. inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
  517. if (!inst)
  518. return ERR_PTR(-ENOMEM);
  519. spawn = crypto_instance_ctx(inst);
  520. /* Ignore async algorithms if necessary. */
  521. mask |= crypto_requires_sync(algt->type, algt->mask);
  522. crypto_set_skcipher_spawn(spawn, inst);
  523. err = crypto_grab_nivcipher(spawn, name, type, mask);
  524. if (err)
  525. goto err_free_inst;
  526. alg = crypto_skcipher_spawn_alg(spawn);
  527. if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
  528. CRYPTO_ALG_TYPE_BLKCIPHER) {
  529. balg.ivsize = alg->cra_blkcipher.ivsize;
  530. balg.min_keysize = alg->cra_blkcipher.min_keysize;
  531. balg.max_keysize = alg->cra_blkcipher.max_keysize;
  532. balg.setkey = async_setkey;
  533. balg.encrypt = async_encrypt;
  534. balg.decrypt = async_decrypt;
  535. balg.geniv = alg->cra_blkcipher.geniv;
  536. } else {
  537. balg.ivsize = alg->cra_ablkcipher.ivsize;
  538. balg.min_keysize = alg->cra_ablkcipher.min_keysize;
  539. balg.max_keysize = alg->cra_ablkcipher.max_keysize;
  540. balg.setkey = alg->cra_ablkcipher.setkey;
  541. balg.encrypt = alg->cra_ablkcipher.encrypt;
  542. balg.decrypt = alg->cra_ablkcipher.decrypt;
  543. balg.geniv = alg->cra_ablkcipher.geniv;
  544. }
  545. err = -EINVAL;
  546. if (!balg.ivsize)
  547. goto err_drop_alg;
  548. /*
  549. * This is only true if we're constructing an algorithm with its
  550. * default IV generator. For the default generator we elide the
  551. * template name and double-check the IV generator.
  552. */
  553. if (algt->mask & CRYPTO_ALG_GENIV) {
  554. if (!balg.geniv)
  555. balg.geniv = crypto_default_geniv(alg);
  556. err = -EAGAIN;
  557. if (strcmp(tmpl->name, balg.geniv))
  558. goto err_drop_alg;
  559. memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
  560. memcpy(inst->alg.cra_driver_name, alg->cra_driver_name,
  561. CRYPTO_MAX_ALG_NAME);
  562. } else {
  563. err = -ENAMETOOLONG;
  564. if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
  565. "%s(%s)", tmpl->name, alg->cra_name) >=
  566. CRYPTO_MAX_ALG_NAME)
  567. goto err_drop_alg;
  568. if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  569. "%s(%s)", tmpl->name, alg->cra_driver_name) >=
  570. CRYPTO_MAX_ALG_NAME)
  571. goto err_drop_alg;
  572. }
  573. inst->alg.cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER | CRYPTO_ALG_GENIV;
  574. inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC;
  575. inst->alg.cra_priority = alg->cra_priority;
  576. inst->alg.cra_blocksize = alg->cra_blocksize;
  577. inst->alg.cra_alignmask = alg->cra_alignmask;
  578. inst->alg.cra_type = &crypto_givcipher_type;
  579. inst->alg.cra_ablkcipher.ivsize = balg.ivsize;
  580. inst->alg.cra_ablkcipher.min_keysize = balg.min_keysize;
  581. inst->alg.cra_ablkcipher.max_keysize = balg.max_keysize;
  582. inst->alg.cra_ablkcipher.geniv = balg.geniv;
  583. inst->alg.cra_ablkcipher.setkey = balg.setkey;
  584. inst->alg.cra_ablkcipher.encrypt = balg.encrypt;
  585. inst->alg.cra_ablkcipher.decrypt = balg.decrypt;
  586. out:
  587. return inst;
  588. err_drop_alg:
  589. crypto_drop_skcipher(spawn);
  590. err_free_inst:
  591. kfree(inst);
  592. inst = ERR_PTR(err);
  593. goto out;
  594. }
  595. EXPORT_SYMBOL_GPL(skcipher_geniv_alloc);
  596. void skcipher_geniv_free(struct crypto_instance *inst)
  597. {
  598. crypto_drop_skcipher(crypto_instance_ctx(inst));
  599. kfree(inst);
  600. }
  601. EXPORT_SYMBOL_GPL(skcipher_geniv_free);
  602. int skcipher_geniv_init(struct crypto_tfm *tfm)
  603. {
  604. struct crypto_instance *inst = (void *)tfm->__crt_alg;
  605. struct crypto_ablkcipher *cipher;
  606. #ifdef CONFIG_CRYPTO_FIPS
  607. if (unlikely(in_fips_err()))
  608. return (-EACCES);
  609. #endif
  610. cipher = crypto_spawn_skcipher(crypto_instance_ctx(inst));
  611. if (IS_ERR(cipher))
  612. return PTR_ERR(cipher);
  613. tfm->crt_ablkcipher.base = cipher;
  614. tfm->crt_ablkcipher.reqsize += crypto_ablkcipher_reqsize(cipher);
  615. return 0;
  616. }
  617. EXPORT_SYMBOL_GPL(skcipher_geniv_init);
  618. void skcipher_geniv_exit(struct crypto_tfm *tfm)
  619. {
  620. crypto_free_ablkcipher(tfm->crt_ablkcipher.base);
  621. }
  622. EXPORT_SYMBOL_GPL(skcipher_geniv_exit);
  623. MODULE_LICENSE("GPL");
  624. MODULE_DESCRIPTION("Generic block chaining cipher type");