ahash.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. /*
  2. * Asynchronous Cryptographic Hash operations.
  3. *
  4. * This is the asynchronous version of hash.c with notification of
  5. * completion via a callback.
  6. *
  7. * Copyright (c) 2008 Loc Ho <lho@amcc.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the Free
  11. * Software Foundation; either version 2 of the License, or (at your option)
  12. * any later version.
  13. *
  14. */
  15. #include <crypto/internal/hash.h>
  16. #include <crypto/scatterwalk.h>
  17. #include <linux/err.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/sched.h>
  21. #include <linux/slab.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/cryptouser.h>
  24. #include <net/netlink.h>
  25. #include "internal.h"
  26. struct ahash_request_priv {
  27. crypto_completion_t complete;
  28. void *data;
  29. u8 *result;
  30. u32 flags;
  31. void *ubuf[] CRYPTO_MINALIGN_ATTR;
  32. };
  33. static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
  34. {
  35. return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
  36. halg);
  37. }
  38. static int hash_walk_next(struct crypto_hash_walk *walk)
  39. {
  40. unsigned int alignmask = walk->alignmask;
  41. unsigned int offset = walk->offset;
  42. unsigned int nbytes = min(walk->entrylen,
  43. ((unsigned int)(PAGE_SIZE)) - offset);
  44. walk->data = kmap_atomic(walk->pg);
  45. walk->data += offset;
  46. if (offset & alignmask) {
  47. unsigned int unaligned = alignmask + 1 - (offset & alignmask);
  48. if (nbytes > unaligned)
  49. nbytes = unaligned;
  50. }
  51. walk->entrylen -= nbytes;
  52. return nbytes;
  53. }
  54. static int hash_walk_new_entry(struct crypto_hash_walk *walk)
  55. {
  56. struct scatterlist *sg;
  57. sg = walk->sg;
  58. walk->pg = sg_page(sg);
  59. walk->offset = sg->offset;
  60. walk->entrylen = sg->length;
  61. if (walk->entrylen > walk->total)
  62. walk->entrylen = walk->total;
  63. walk->total -= walk->entrylen;
  64. return hash_walk_next(walk);
  65. }
  66. int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
  67. {
  68. unsigned int alignmask = walk->alignmask;
  69. unsigned int nbytes = walk->entrylen;
  70. #ifdef CONFIG_CRYPTO_FIPS
  71. if (unlikely(in_fips_err()))
  72. return -EACCES;
  73. #endif
  74. walk->data -= walk->offset;
  75. if (nbytes && walk->offset & alignmask && !err) {
  76. walk->offset = ALIGN(walk->offset, alignmask + 1);
  77. walk->data += walk->offset;
  78. nbytes = min(nbytes,
  79. ((unsigned int)(PAGE_SIZE)) - walk->offset);
  80. walk->entrylen -= nbytes;
  81. return nbytes;
  82. }
  83. kunmap_atomic(walk->data);
  84. crypto_yield(walk->flags);
  85. if (err)
  86. return err;
  87. if (nbytes) {
  88. walk->offset = 0;
  89. walk->pg++;
  90. return hash_walk_next(walk);
  91. }
  92. if (!walk->total)
  93. return 0;
  94. walk->sg = scatterwalk_sg_next(walk->sg);
  95. return hash_walk_new_entry(walk);
  96. }
  97. EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
  98. int crypto_hash_walk_first(struct ahash_request *req,
  99. struct crypto_hash_walk *walk)
  100. {
  101. #ifdef CONFIG_CRYPTO_FIPS
  102. if (unlikely(in_fips_err()))
  103. return -EACCES;
  104. #endif
  105. walk->total = req->nbytes;
  106. if (!walk->total)
  107. return 0;
  108. walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
  109. walk->sg = req->src;
  110. walk->flags = req->base.flags;
  111. return hash_walk_new_entry(walk);
  112. }
  113. EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
  114. int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
  115. struct crypto_hash_walk *walk,
  116. struct scatterlist *sg, unsigned int len)
  117. {
  118. #ifdef CONFIG_CRYPTO_FIPS
  119. if (unlikely(in_fips_err()))
  120. return -EACCES;
  121. #endif
  122. walk->total = len;
  123. if (!walk->total)
  124. return 0;
  125. walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
  126. walk->sg = sg;
  127. walk->flags = hdesc->flags;
  128. return hash_walk_new_entry(walk);
  129. }
  130. static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
  131. unsigned int keylen)
  132. {
  133. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  134. int ret;
  135. u8 *buffer, *alignbuffer;
  136. unsigned long absize;
  137. absize = keylen + alignmask;
  138. buffer = kmalloc(absize, GFP_KERNEL);
  139. if (!buffer)
  140. return -ENOMEM;
  141. alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  142. memcpy(alignbuffer, key, keylen);
  143. ret = tfm->setkey(tfm, alignbuffer, keylen);
  144. kzfree(buffer);
  145. return ret;
  146. }
  147. int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
  148. unsigned int keylen)
  149. {
  150. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  151. if ((unsigned long)key & alignmask)
  152. return ahash_setkey_unaligned(tfm, key, keylen);
  153. return tfm->setkey(tfm, key, keylen);
  154. }
  155. EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
  156. static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
  157. unsigned int keylen)
  158. {
  159. return -ENOSYS;
  160. }
  161. static inline unsigned int ahash_align_buffer_size(unsigned len,
  162. unsigned long mask)
  163. {
  164. return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
  165. }
  166. static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
  167. {
  168. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  169. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  170. unsigned int ds = crypto_ahash_digestsize(tfm);
  171. struct ahash_request_priv *priv;
  172. priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
  173. (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  174. GFP_KERNEL : GFP_ATOMIC);
  175. if (!priv)
  176. return -ENOMEM;
  177. priv->result = req->result;
  178. priv->complete = req->base.complete;
  179. priv->data = req->base.data;
  180. priv->flags = req->base.flags;
  181. req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
  182. req->base.complete = cplt;
  183. req->base.data = req;
  184. req->priv = priv;
  185. return 0;
  186. }
  187. static void ahash_restore_req(struct ahash_request *req, int err)
  188. {
  189. struct ahash_request_priv *priv = req->priv;
  190. if (!err)
  191. memcpy(priv->result, req->result,
  192. crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
  193. /* Restore the original crypto request. */
  194. req->result = priv->result;
  195. ahash_request_set_callback(req, priv->flags,
  196. priv->complete, priv->data);
  197. req->priv = NULL;
  198. /* Free the req->priv.priv from the ADJUSTED request. */
  199. kzfree(priv);
  200. }
  201. static void ahash_notify_einprogress(struct ahash_request *req)
  202. {
  203. struct ahash_request_priv *priv = req->priv;
  204. struct crypto_async_request oreq;
  205. oreq.data = priv->data;
  206. priv->complete(&oreq, -EINPROGRESS);
  207. }
  208. static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
  209. {
  210. struct ahash_request *areq = req->data;
  211. if (err == -EINPROGRESS) {
  212. ahash_notify_einprogress(areq);
  213. return;
  214. }
  215. /*
  216. * Restore the original request, see ahash_op_unaligned() for what
  217. * goes where.
  218. *
  219. * The "struct ahash_request *req" here is in fact the "req.base"
  220. * from the ADJUSTED request from ahash_op_unaligned(), thus as it
  221. * is a pointer to self, it is also the ADJUSTED "req" .
  222. */
  223. /* First copy req->result into req->priv.result */
  224. ahash_restore_req(areq, err);
  225. /* Complete the ORIGINAL request. */
  226. areq->base.complete(&areq->base, err);
  227. }
  228. static int ahash_op_unaligned(struct ahash_request *req,
  229. int (*op)(struct ahash_request *))
  230. {
  231. int err;
  232. err = ahash_save_req(req, ahash_op_unaligned_done);
  233. if (err)
  234. return err;
  235. err = op(req);
  236. if (err == -EINPROGRESS ||
  237. (err == -EBUSY && (ahash_request_flags(req) &
  238. CRYPTO_TFM_REQ_MAY_BACKLOG)))
  239. return err;
  240. ahash_restore_req(req, err);
  241. return err;
  242. }
  243. static int crypto_ahash_op(struct ahash_request *req,
  244. int (*op)(struct ahash_request *))
  245. {
  246. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  247. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  248. #ifdef CONFIG_CRYPTO_FIPS
  249. if (unlikely(in_fips_err()))
  250. return -EACCES;
  251. #endif
  252. if ((unsigned long)req->result & alignmask)
  253. return ahash_op_unaligned(req, op);
  254. return op(req);
  255. }
  256. int crypto_ahash_final(struct ahash_request *req)
  257. {
  258. return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
  259. }
  260. EXPORT_SYMBOL_GPL(crypto_ahash_final);
  261. int crypto_ahash_finup(struct ahash_request *req)
  262. {
  263. return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
  264. }
  265. EXPORT_SYMBOL_GPL(crypto_ahash_finup);
  266. int crypto_ahash_digest(struct ahash_request *req)
  267. {
  268. return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
  269. }
  270. EXPORT_SYMBOL_GPL(crypto_ahash_digest);
  271. static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
  272. {
  273. struct ahash_request *areq = req->data;
  274. if (err == -EINPROGRESS)
  275. return;
  276. ahash_restore_req(areq, err);
  277. areq->base.complete(&areq->base, err);
  278. }
  279. static int ahash_def_finup_finish1(struct ahash_request *req, int err)
  280. {
  281. if (err)
  282. goto out;
  283. req->base.complete = ahash_def_finup_done2;
  284. err = crypto_ahash_reqtfm(req)->final(req);
  285. if (err == -EINPROGRESS ||
  286. (err == -EBUSY && (ahash_request_flags(req) &
  287. CRYPTO_TFM_REQ_MAY_BACKLOG)))
  288. return err;
  289. out:
  290. ahash_restore_req(req, err);
  291. return err;
  292. }
  293. static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
  294. {
  295. struct ahash_request *areq = req->data;
  296. if (err == -EINPROGRESS) {
  297. ahash_notify_einprogress(areq);
  298. return;
  299. }
  300. areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  301. err = ahash_def_finup_finish1(areq, err);
  302. if (areq->priv)
  303. return;
  304. areq->base.complete(&areq->base, err);
  305. }
  306. static int ahash_def_finup(struct ahash_request *req)
  307. {
  308. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  309. int err;
  310. err = ahash_save_req(req, ahash_def_finup_done1);
  311. if (err)
  312. return err;
  313. err = tfm->update(req);
  314. if (err == -EINPROGRESS ||
  315. (err == -EBUSY && (ahash_request_flags(req) &
  316. CRYPTO_TFM_REQ_MAY_BACKLOG)))
  317. return err;
  318. return ahash_def_finup_finish1(req, err);
  319. }
  320. static int ahash_no_export(struct ahash_request *req, void *out)
  321. {
  322. return -ENOSYS;
  323. }
  324. static int ahash_no_import(struct ahash_request *req, const void *in)
  325. {
  326. return -ENOSYS;
  327. }
  328. static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
  329. {
  330. struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
  331. struct ahash_alg *alg = crypto_ahash_alg(hash);
  332. #ifdef CONFIG_CRYPTO_FIPS
  333. if (unlikely(in_fips_err()))
  334. return -EACCES;
  335. #endif
  336. hash->setkey = ahash_nosetkey;
  337. hash->export = ahash_no_export;
  338. hash->import = ahash_no_import;
  339. if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
  340. return crypto_init_shash_ops_async(tfm);
  341. hash->init = alg->init;
  342. hash->update = alg->update;
  343. hash->final = alg->final;
  344. hash->finup = alg->finup ?: ahash_def_finup;
  345. hash->digest = alg->digest;
  346. if (alg->setkey)
  347. hash->setkey = alg->setkey;
  348. if (alg->export)
  349. hash->export = alg->export;
  350. if (alg->import)
  351. hash->import = alg->import;
  352. return 0;
  353. }
  354. static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
  355. {
  356. if (alg->cra_type == &crypto_ahash_type)
  357. return alg->cra_ctxsize;
  358. return sizeof(struct crypto_shash *);
  359. }
  360. #ifdef CONFIG_NET
  361. static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
  362. {
  363. struct crypto_report_hash rhash;
  364. strncpy(rhash.type, "ahash", sizeof(rhash.type));
  365. rhash.blocksize = alg->cra_blocksize;
  366. rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
  367. NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH,
  368. sizeof(struct crypto_report_hash), &rhash);
  369. return 0;
  370. nla_put_failure:
  371. return -EMSGSIZE;
  372. }
  373. #else
  374. static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
  375. {
  376. return -ENOSYS;
  377. }
  378. #endif
  379. static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
  380. __attribute__ ((unused));
  381. static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
  382. {
  383. seq_printf(m, "type : ahash\n");
  384. seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
  385. "yes" : "no");
  386. seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  387. seq_printf(m, "digestsize : %u\n",
  388. __crypto_hash_alg_common(alg)->digestsize);
  389. }
  390. const struct crypto_type crypto_ahash_type = {
  391. .extsize = crypto_ahash_extsize,
  392. .init_tfm = crypto_ahash_init_tfm,
  393. #ifdef CONFIG_PROC_FS
  394. .show = crypto_ahash_show,
  395. #endif
  396. .report = crypto_ahash_report,
  397. .maskclear = ~CRYPTO_ALG_TYPE_MASK,
  398. .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
  399. .type = CRYPTO_ALG_TYPE_AHASH,
  400. .tfmsize = offsetof(struct crypto_ahash, base),
  401. };
  402. EXPORT_SYMBOL_GPL(crypto_ahash_type);
  403. struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
  404. u32 mask)
  405. {
  406. return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
  407. }
  408. EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
  409. static int ahash_prepare_alg(struct ahash_alg *alg)
  410. {
  411. struct crypto_alg *base = &alg->halg.base;
  412. if (alg->halg.digestsize > PAGE_SIZE / 8 ||
  413. alg->halg.statesize > PAGE_SIZE / 8 ||
  414. alg->halg.statesize == 0)
  415. return -EINVAL;
  416. base->cra_type = &crypto_ahash_type;
  417. base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
  418. base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
  419. return 0;
  420. }
  421. int crypto_register_ahash(struct ahash_alg *alg)
  422. {
  423. struct crypto_alg *base = &alg->halg.base;
  424. int err;
  425. err = ahash_prepare_alg(alg);
  426. if (err)
  427. return err;
  428. return crypto_register_alg(base);
  429. }
  430. EXPORT_SYMBOL_GPL(crypto_register_ahash);
  431. int crypto_unregister_ahash(struct ahash_alg *alg)
  432. {
  433. return crypto_unregister_alg(&alg->halg.base);
  434. }
  435. EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
  436. int ahash_register_instance(struct crypto_template *tmpl,
  437. struct ahash_instance *inst)
  438. {
  439. int err;
  440. #ifdef CONFIG_CRYPTO_FIPS
  441. if (unlikely(in_fips_err()))
  442. return -EACCES;
  443. #endif
  444. err = ahash_prepare_alg(&inst->alg);
  445. if (err)
  446. return err;
  447. return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
  448. }
  449. EXPORT_SYMBOL_GPL(ahash_register_instance);
  450. void ahash_free_instance(struct crypto_instance *inst)
  451. {
  452. crypto_drop_spawn(crypto_instance_ctx(inst));
  453. kfree(ahash_instance(inst));
  454. }
  455. EXPORT_SYMBOL_GPL(ahash_free_instance);
  456. int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
  457. struct hash_alg_common *alg,
  458. struct crypto_instance *inst)
  459. {
  460. return crypto_init_spawn2(&spawn->base, &alg->base, inst,
  461. &crypto_ahash_type);
  462. }
  463. EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
  464. struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
  465. {
  466. struct crypto_alg *alg;
  467. alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
  468. return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
  469. }
  470. EXPORT_SYMBOL_GPL(ahash_attr_alg);
  471. MODULE_LICENSE("GPL");
  472. MODULE_DESCRIPTION("Asynchronous cryptographic hash type");