ahash.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506
  1. /*
  2. * Asynchronous Cryptographic Hash operations.
  3. *
  4. * This is the asynchronous version of hash.c with notification of
  5. * completion via a callback.
  6. *
  7. * Copyright (c) 2008 Loc Ho <lho@amcc.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the Free
  11. * Software Foundation; either version 2 of the License, or (at your option)
  12. * any later version.
  13. *
  14. */
  15. #include <crypto/internal/hash.h>
  16. #include <crypto/scatterwalk.h>
  17. #include <linux/err.h>
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/sched.h>
  21. #include <linux/slab.h>
  22. #include <linux/seq_file.h>
  23. #include "internal.h"
  24. struct ahash_request_priv {
  25. crypto_completion_t complete;
  26. void *data;
  27. u8 *result;
  28. void *ubuf[] CRYPTO_MINALIGN_ATTR;
  29. };
  30. static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
  31. {
  32. return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
  33. halg);
  34. }
  35. static int hash_walk_next(struct crypto_hash_walk *walk)
  36. {
  37. unsigned int alignmask = walk->alignmask;
  38. unsigned int offset = walk->offset;
  39. unsigned int nbytes = min(walk->entrylen,
  40. ((unsigned int)(PAGE_SIZE)) - offset);
  41. walk->data = crypto_kmap(walk->pg, 0);
  42. walk->data += offset;
  43. if (offset & alignmask) {
  44. unsigned int unaligned = alignmask + 1 - (offset & alignmask);
  45. if (nbytes > unaligned)
  46. nbytes = unaligned;
  47. }
  48. walk->entrylen -= nbytes;
  49. return nbytes;
  50. }
  51. static int hash_walk_new_entry(struct crypto_hash_walk *walk)
  52. {
  53. struct scatterlist *sg;
  54. sg = walk->sg;
  55. walk->pg = sg_page(sg);
  56. walk->offset = sg->offset;
  57. walk->entrylen = sg->length;
  58. if (walk->entrylen > walk->total)
  59. walk->entrylen = walk->total;
  60. walk->total -= walk->entrylen;
  61. return hash_walk_next(walk);
  62. }
  63. int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
  64. {
  65. unsigned int alignmask = walk->alignmask;
  66. unsigned int nbytes = walk->entrylen;
  67. walk->data -= walk->offset;
  68. if (nbytes && walk->offset & alignmask && !err) {
  69. walk->offset = ALIGN(walk->offset, alignmask + 1);
  70. walk->data += walk->offset;
  71. nbytes = min(nbytes,
  72. ((unsigned int)(PAGE_SIZE)) - walk->offset);
  73. walk->entrylen -= nbytes;
  74. return nbytes;
  75. }
  76. crypto_kunmap(walk->data, 0);
  77. crypto_yield(walk->flags);
  78. if (err)
  79. return err;
  80. if (nbytes) {
  81. walk->offset = 0;
  82. walk->pg++;
  83. return hash_walk_next(walk);
  84. }
  85. if (!walk->total)
  86. return 0;
  87. walk->sg = scatterwalk_sg_next(walk->sg);
  88. return hash_walk_new_entry(walk);
  89. }
  90. EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
  91. int crypto_hash_walk_first(struct ahash_request *req,
  92. struct crypto_hash_walk *walk)
  93. {
  94. walk->total = req->nbytes;
  95. if (!walk->total)
  96. return 0;
  97. walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
  98. walk->sg = req->src;
  99. walk->flags = req->base.flags;
  100. return hash_walk_new_entry(walk);
  101. }
  102. EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
  103. int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
  104. struct crypto_hash_walk *walk,
  105. struct scatterlist *sg, unsigned int len)
  106. {
  107. walk->total = len;
  108. if (!walk->total)
  109. return 0;
  110. walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
  111. walk->sg = sg;
  112. walk->flags = hdesc->flags;
  113. return hash_walk_new_entry(walk);
  114. }
  115. static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
  116. unsigned int keylen)
  117. {
  118. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  119. int ret;
  120. u8 *buffer, *alignbuffer;
  121. unsigned long absize;
  122. absize = keylen + alignmask;
  123. buffer = kmalloc(absize, GFP_KERNEL);
  124. if (!buffer)
  125. return -ENOMEM;
  126. alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  127. memcpy(alignbuffer, key, keylen);
  128. ret = tfm->setkey(tfm, alignbuffer, keylen);
  129. kzfree(buffer);
  130. return ret;
  131. }
  132. int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
  133. unsigned int keylen)
  134. {
  135. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  136. if ((unsigned long)key & alignmask)
  137. return ahash_setkey_unaligned(tfm, key, keylen);
  138. return tfm->setkey(tfm, key, keylen);
  139. }
  140. EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
  141. static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
  142. unsigned int keylen)
  143. {
  144. return -ENOSYS;
  145. }
  146. static inline unsigned int ahash_align_buffer_size(unsigned len,
  147. unsigned long mask)
  148. {
  149. return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
  150. }
  151. static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
  152. {
  153. struct ahash_request_priv *priv = req->priv;
  154. if (err == -EINPROGRESS)
  155. return;
  156. if (!err)
  157. memcpy(priv->result, req->result,
  158. crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
  159. kzfree(priv);
  160. }
  161. static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
  162. {
  163. struct ahash_request *areq = req->data;
  164. struct ahash_request_priv *priv = areq->priv;
  165. crypto_completion_t complete = priv->complete;
  166. void *data = priv->data;
  167. ahash_op_unaligned_finish(areq, err);
  168. complete(data, err);
  169. }
  170. static int ahash_op_unaligned(struct ahash_request *req,
  171. int (*op)(struct ahash_request *))
  172. {
  173. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  174. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  175. unsigned int ds = crypto_ahash_digestsize(tfm);
  176. struct ahash_request_priv *priv;
  177. int err;
  178. priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
  179. (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  180. GFP_KERNEL : GFP_ATOMIC);
  181. if (!priv)
  182. return -ENOMEM;
  183. priv->result = req->result;
  184. priv->complete = req->base.complete;
  185. priv->data = req->base.data;
  186. req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
  187. req->base.complete = ahash_op_unaligned_done;
  188. req->base.data = req;
  189. req->priv = priv;
  190. err = op(req);
  191. ahash_op_unaligned_finish(req, err);
  192. return err;
  193. }
  194. static int crypto_ahash_op(struct ahash_request *req,
  195. int (*op)(struct ahash_request *))
  196. {
  197. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  198. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  199. if ((unsigned long)req->result & alignmask)
  200. return ahash_op_unaligned(req, op);
  201. return op(req);
  202. }
  203. int crypto_ahash_final(struct ahash_request *req)
  204. {
  205. return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
  206. }
  207. EXPORT_SYMBOL_GPL(crypto_ahash_final);
  208. int crypto_ahash_finup(struct ahash_request *req)
  209. {
  210. return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
  211. }
  212. EXPORT_SYMBOL_GPL(crypto_ahash_finup);
  213. int crypto_ahash_digest(struct ahash_request *req)
  214. {
  215. return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
  216. }
  217. EXPORT_SYMBOL_GPL(crypto_ahash_digest);
  218. static void ahash_def_finup_finish2(struct ahash_request *req, int err)
  219. {
  220. struct ahash_request_priv *priv = req->priv;
  221. if (err == -EINPROGRESS)
  222. return;
  223. if (!err)
  224. memcpy(priv->result, req->result,
  225. crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
  226. kzfree(priv);
  227. }
  228. static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
  229. {
  230. struct ahash_request *areq = req->data;
  231. struct ahash_request_priv *priv = areq->priv;
  232. crypto_completion_t complete = priv->complete;
  233. void *data = priv->data;
  234. ahash_def_finup_finish2(areq, err);
  235. complete(data, err);
  236. }
  237. static int ahash_def_finup_finish1(struct ahash_request *req, int err)
  238. {
  239. if (err)
  240. goto out;
  241. req->base.complete = ahash_def_finup_done2;
  242. req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
  243. err = crypto_ahash_reqtfm(req)->final(req);
  244. out:
  245. ahash_def_finup_finish2(req, err);
  246. return err;
  247. }
  248. static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
  249. {
  250. struct ahash_request *areq = req->data;
  251. struct ahash_request_priv *priv = areq->priv;
  252. crypto_completion_t complete = priv->complete;
  253. void *data = priv->data;
  254. err = ahash_def_finup_finish1(areq, err);
  255. complete(data, err);
  256. }
  257. static int ahash_def_finup(struct ahash_request *req)
  258. {
  259. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  260. unsigned long alignmask = crypto_ahash_alignmask(tfm);
  261. unsigned int ds = crypto_ahash_digestsize(tfm);
  262. struct ahash_request_priv *priv;
  263. priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
  264. (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  265. GFP_KERNEL : GFP_ATOMIC);
  266. if (!priv)
  267. return -ENOMEM;
  268. priv->result = req->result;
  269. priv->complete = req->base.complete;
  270. priv->data = req->base.data;
  271. req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
  272. req->base.complete = ahash_def_finup_done1;
  273. req->base.data = req;
  274. req->priv = priv;
  275. return ahash_def_finup_finish1(req, tfm->update(req));
  276. }
  277. static int ahash_no_export(struct ahash_request *req, void *out)
  278. {
  279. return -ENOSYS;
  280. }
  281. static int ahash_no_import(struct ahash_request *req, const void *in)
  282. {
  283. return -ENOSYS;
  284. }
  285. static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
  286. {
  287. struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
  288. struct ahash_alg *alg = crypto_ahash_alg(hash);
  289. hash->setkey = ahash_nosetkey;
  290. hash->export = ahash_no_export;
  291. hash->import = ahash_no_import;
  292. if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
  293. return crypto_init_shash_ops_async(tfm);
  294. hash->init = alg->init;
  295. hash->update = alg->update;
  296. hash->final = alg->final;
  297. hash->finup = alg->finup ?: ahash_def_finup;
  298. hash->digest = alg->digest;
  299. if (alg->setkey)
  300. hash->setkey = alg->setkey;
  301. if (alg->export)
  302. hash->export = alg->export;
  303. if (alg->import)
  304. hash->import = alg->import;
  305. return 0;
  306. }
  307. static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
  308. {
  309. if (alg->cra_type == &crypto_ahash_type)
  310. return alg->cra_ctxsize;
  311. return sizeof(struct crypto_shash *);
  312. }
  313. static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
  314. __attribute__ ((unused));
  315. static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
  316. {
  317. seq_printf(m, "type : ahash\n");
  318. seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
  319. "yes" : "no");
  320. seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  321. seq_printf(m, "digestsize : %u\n",
  322. __crypto_hash_alg_common(alg)->digestsize);
  323. }
  324. const struct crypto_type crypto_ahash_type = {
  325. .extsize = crypto_ahash_extsize,
  326. .init_tfm = crypto_ahash_init_tfm,
  327. #ifdef CONFIG_PROC_FS
  328. .show = crypto_ahash_show,
  329. #endif
  330. .maskclear = ~CRYPTO_ALG_TYPE_MASK,
  331. .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
  332. .type = CRYPTO_ALG_TYPE_AHASH,
  333. .tfmsize = offsetof(struct crypto_ahash, base),
  334. };
  335. EXPORT_SYMBOL_GPL(crypto_ahash_type);
  336. struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
  337. u32 mask)
  338. {
  339. return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
  340. }
  341. EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
  342. static int ahash_prepare_alg(struct ahash_alg *alg)
  343. {
  344. struct crypto_alg *base = &alg->halg.base;
  345. if (alg->halg.digestsize > PAGE_SIZE / 8 ||
  346. alg->halg.statesize > PAGE_SIZE / 8)
  347. return -EINVAL;
  348. base->cra_type = &crypto_ahash_type;
  349. base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
  350. base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
  351. return 0;
  352. }
  353. int crypto_register_ahash(struct ahash_alg *alg)
  354. {
  355. struct crypto_alg *base = &alg->halg.base;
  356. int err;
  357. err = ahash_prepare_alg(alg);
  358. if (err)
  359. return err;
  360. return crypto_register_alg(base);
  361. }
  362. EXPORT_SYMBOL_GPL(crypto_register_ahash);
  363. int crypto_unregister_ahash(struct ahash_alg *alg)
  364. {
  365. return crypto_unregister_alg(&alg->halg.base);
  366. }
  367. EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
  368. int ahash_register_instance(struct crypto_template *tmpl,
  369. struct ahash_instance *inst)
  370. {
  371. int err;
  372. err = ahash_prepare_alg(&inst->alg);
  373. if (err)
  374. return err;
  375. return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
  376. }
  377. EXPORT_SYMBOL_GPL(ahash_register_instance);
  378. void ahash_free_instance(struct crypto_instance *inst)
  379. {
  380. crypto_drop_spawn(crypto_instance_ctx(inst));
  381. kfree(ahash_instance(inst));
  382. }
  383. EXPORT_SYMBOL_GPL(ahash_free_instance);
  384. int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
  385. struct hash_alg_common *alg,
  386. struct crypto_instance *inst)
  387. {
  388. return crypto_init_spawn2(&spawn->base, &alg->base, inst,
  389. &crypto_ahash_type);
  390. }
  391. EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
  392. struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
  393. {
  394. struct crypto_alg *alg;
  395. alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
  396. return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
  397. }
  398. EXPORT_SYMBOL_GPL(ahash_attr_alg);
  399. MODULE_LICENSE("GPL");
  400. MODULE_DESCRIPTION("Asynchronous cryptographic hash type");