omap-aes-gcm.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Support for OMAP AES GCM HW acceleration.
  5. *
  6. * Copyright (c) 2016 Texas Instruments Incorporated
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as published
  10. * by the Free Software Foundation.
  11. *
  12. */
  13. #include <linux/errno.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/dmaengine.h>
  17. #include <linux/omap-dma.h>
  18. #include <linux/interrupt.h>
  19. #include <crypto/aes.h>
  20. #include <crypto/scatterwalk.h>
  21. #include <crypto/skcipher.h>
  22. #include <crypto/internal/aead.h>
  23. #include "omap-crypto.h"
  24. #include "omap-aes.h"
  25. static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
  26. struct aead_request *req);
  27. static void omap_aes_gcm_finish_req(struct omap_aes_dev *dd, int ret)
  28. {
  29. struct aead_request *req = dd->aead_req;
  30. dd->flags &= ~FLAGS_BUSY;
  31. dd->in_sg = NULL;
  32. dd->out_sg = NULL;
  33. req->base.complete(&req->base, ret);
  34. }
  35. static void omap_aes_gcm_done_task(struct omap_aes_dev *dd)
  36. {
  37. u8 *tag;
  38. int alen, clen, i, ret = 0, nsg;
  39. struct omap_aes_reqctx *rctx;
  40. alen = ALIGN(dd->assoc_len, AES_BLOCK_SIZE);
  41. clen = ALIGN(dd->total, AES_BLOCK_SIZE);
  42. rctx = aead_request_ctx(dd->aead_req);
  43. nsg = !!(dd->assoc_len && dd->total);
  44. dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
  45. DMA_FROM_DEVICE);
  46. dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
  47. dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
  48. omap_aes_crypt_dma_stop(dd);
  49. omap_crypto_cleanup(dd->out_sg, dd->orig_out,
  50. dd->aead_req->assoclen, dd->total,
  51. FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
  52. if (dd->flags & FLAGS_ENCRYPT)
  53. scatterwalk_map_and_copy(rctx->auth_tag,
  54. dd->aead_req->dst,
  55. dd->total + dd->aead_req->assoclen,
  56. dd->authsize, 1);
  57. omap_crypto_cleanup(&dd->in_sgl[0], NULL, 0, alen,
  58. FLAGS_ASSOC_DATA_ST_SHIFT, dd->flags);
  59. omap_crypto_cleanup(&dd->in_sgl[nsg], NULL, 0, clen,
  60. FLAGS_IN_DATA_ST_SHIFT, dd->flags);
  61. if (!(dd->flags & FLAGS_ENCRYPT)) {
  62. tag = (u8 *)rctx->auth_tag;
  63. for (i = 0; i < dd->authsize; i++) {
  64. if (tag[i]) {
  65. dev_err(dd->dev, "GCM decryption: Tag Message is wrong\n");
  66. ret = -EBADMSG;
  67. }
  68. }
  69. }
  70. omap_aes_gcm_finish_req(dd, ret);
  71. omap_aes_gcm_handle_queue(dd, NULL);
  72. }
  73. static int omap_aes_gcm_copy_buffers(struct omap_aes_dev *dd,
  74. struct aead_request *req)
  75. {
  76. int alen, clen, cryptlen, assoclen, ret;
  77. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  78. unsigned int authlen = crypto_aead_authsize(aead);
  79. struct scatterlist *tmp, sg_arr[2];
  80. int nsg;
  81. u16 flags;
  82. assoclen = req->assoclen;
  83. cryptlen = req->cryptlen;
  84. if (dd->flags & FLAGS_RFC4106_GCM)
  85. assoclen -= 8;
  86. if (!(dd->flags & FLAGS_ENCRYPT))
  87. cryptlen -= authlen;
  88. alen = ALIGN(assoclen, AES_BLOCK_SIZE);
  89. clen = ALIGN(cryptlen, AES_BLOCK_SIZE);
  90. nsg = !!(assoclen && cryptlen);
  91. omap_aes_clear_copy_flags(dd);
  92. sg_init_table(dd->in_sgl, nsg + 1);
  93. if (assoclen) {
  94. tmp = req->src;
  95. ret = omap_crypto_align_sg(&tmp, assoclen,
  96. AES_BLOCK_SIZE, dd->in_sgl,
  97. OMAP_CRYPTO_COPY_DATA |
  98. OMAP_CRYPTO_ZERO_BUF |
  99. OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
  100. FLAGS_ASSOC_DATA_ST_SHIFT,
  101. &dd->flags);
  102. }
  103. if (cryptlen) {
  104. tmp = scatterwalk_ffwd(sg_arr, req->src, req->assoclen);
  105. ret = omap_crypto_align_sg(&tmp, cryptlen,
  106. AES_BLOCK_SIZE, &dd->in_sgl[nsg],
  107. OMAP_CRYPTO_COPY_DATA |
  108. OMAP_CRYPTO_ZERO_BUF |
  109. OMAP_CRYPTO_FORCE_SINGLE_ENTRY,
  110. FLAGS_IN_DATA_ST_SHIFT,
  111. &dd->flags);
  112. }
  113. dd->in_sg = dd->in_sgl;
  114. dd->total = cryptlen;
  115. dd->assoc_len = assoclen;
  116. dd->authsize = authlen;
  117. dd->out_sg = req->dst;
  118. dd->orig_out = req->dst;
  119. dd->out_sg = scatterwalk_ffwd(sg_arr, req->dst, assoclen);
  120. flags = 0;
  121. if (req->src == req->dst || dd->out_sg == sg_arr)
  122. flags |= OMAP_CRYPTO_FORCE_COPY;
  123. ret = omap_crypto_align_sg(&dd->out_sg, cryptlen,
  124. AES_BLOCK_SIZE, &dd->out_sgl,
  125. flags,
  126. FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
  127. if (ret)
  128. return ret;
  129. dd->in_sg_len = sg_nents_for_len(dd->in_sg, alen + clen);
  130. dd->out_sg_len = sg_nents_for_len(dd->out_sg, clen);
  131. return 0;
  132. }
  133. static void omap_aes_gcm_complete(struct crypto_async_request *req, int err)
  134. {
  135. struct omap_aes_gcm_result *res = req->data;
  136. if (err == -EINPROGRESS)
  137. return;
  138. res->err = err;
  139. complete(&res->completion);
  140. }
  141. static int do_encrypt_iv(struct aead_request *req, u32 *tag, u32 *iv)
  142. {
  143. struct scatterlist iv_sg, tag_sg;
  144. struct skcipher_request *sk_req;
  145. struct omap_aes_gcm_result result;
  146. struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
  147. int ret = 0;
  148. sk_req = skcipher_request_alloc(ctx->ctr, GFP_KERNEL);
  149. if (!sk_req) {
  150. pr_err("skcipher: Failed to allocate request\n");
  151. return -1;
  152. }
  153. init_completion(&result.completion);
  154. sg_init_one(&iv_sg, iv, AES_BLOCK_SIZE);
  155. sg_init_one(&tag_sg, tag, AES_BLOCK_SIZE);
  156. skcipher_request_set_callback(sk_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  157. omap_aes_gcm_complete, &result);
  158. ret = crypto_skcipher_setkey(ctx->ctr, (u8 *)ctx->key, ctx->keylen);
  159. skcipher_request_set_crypt(sk_req, &iv_sg, &tag_sg, AES_BLOCK_SIZE,
  160. NULL);
  161. ret = crypto_skcipher_encrypt(sk_req);
  162. switch (ret) {
  163. case 0:
  164. break;
  165. case -EINPROGRESS:
  166. case -EBUSY:
  167. ret = wait_for_completion_interruptible(&result.completion);
  168. if (!ret) {
  169. ret = result.err;
  170. if (!ret) {
  171. reinit_completion(&result.completion);
  172. break;
  173. }
  174. }
  175. /* fall through */
  176. default:
  177. pr_err("Encryption of IV failed for GCM mode");
  178. break;
  179. }
  180. skcipher_request_free(sk_req);
  181. return ret;
  182. }
  183. void omap_aes_gcm_dma_out_callback(void *data)
  184. {
  185. struct omap_aes_dev *dd = data;
  186. struct omap_aes_reqctx *rctx;
  187. int i, val;
  188. u32 *auth_tag, tag[4];
  189. if (!(dd->flags & FLAGS_ENCRYPT))
  190. scatterwalk_map_and_copy(tag, dd->aead_req->src,
  191. dd->total + dd->aead_req->assoclen,
  192. dd->authsize, 0);
  193. rctx = aead_request_ctx(dd->aead_req);
  194. auth_tag = (u32 *)rctx->auth_tag;
  195. for (i = 0; i < 4; i++) {
  196. val = omap_aes_read(dd, AES_REG_TAG_N(dd, i));
  197. auth_tag[i] = val ^ auth_tag[i];
  198. if (!(dd->flags & FLAGS_ENCRYPT))
  199. auth_tag[i] = auth_tag[i] ^ tag[i];
  200. }
  201. omap_aes_gcm_done_task(dd);
  202. }
  203. static int omap_aes_gcm_handle_queue(struct omap_aes_dev *dd,
  204. struct aead_request *req)
  205. {
  206. struct omap_aes_ctx *ctx;
  207. struct aead_request *backlog;
  208. struct omap_aes_reqctx *rctx;
  209. unsigned long flags;
  210. int err, ret = 0;
  211. spin_lock_irqsave(&dd->lock, flags);
  212. if (req)
  213. ret = aead_enqueue_request(&dd->aead_queue, req);
  214. if (dd->flags & FLAGS_BUSY) {
  215. spin_unlock_irqrestore(&dd->lock, flags);
  216. return ret;
  217. }
  218. backlog = aead_get_backlog(&dd->aead_queue);
  219. req = aead_dequeue_request(&dd->aead_queue);
  220. if (req)
  221. dd->flags |= FLAGS_BUSY;
  222. spin_unlock_irqrestore(&dd->lock, flags);
  223. if (!req)
  224. return ret;
  225. if (backlog)
  226. backlog->base.complete(&backlog->base, -EINPROGRESS);
  227. ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
  228. rctx = aead_request_ctx(req);
  229. dd->ctx = ctx;
  230. rctx->dd = dd;
  231. dd->aead_req = req;
  232. rctx->mode &= FLAGS_MODE_MASK;
  233. dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
  234. err = omap_aes_gcm_copy_buffers(dd, req);
  235. if (err)
  236. return err;
  237. err = omap_aes_write_ctrl(dd);
  238. if (!err)
  239. err = omap_aes_crypt_dma_start(dd);
  240. if (err) {
  241. omap_aes_gcm_finish_req(dd, err);
  242. omap_aes_gcm_handle_queue(dd, NULL);
  243. }
  244. return ret;
  245. }
  246. static int omap_aes_gcm_crypt(struct aead_request *req, unsigned long mode)
  247. {
  248. struct omap_aes_reqctx *rctx = aead_request_ctx(req);
  249. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  250. unsigned int authlen = crypto_aead_authsize(aead);
  251. struct omap_aes_dev *dd;
  252. __be32 counter = cpu_to_be32(1);
  253. int err, assoclen;
  254. memset(rctx->auth_tag, 0, sizeof(rctx->auth_tag));
  255. memcpy(rctx->iv + 12, &counter, 4);
  256. err = do_encrypt_iv(req, (u32 *)rctx->auth_tag, (u32 *)rctx->iv);
  257. if (err)
  258. return err;
  259. if (mode & FLAGS_RFC4106_GCM)
  260. assoclen = req->assoclen - 8;
  261. else
  262. assoclen = req->assoclen;
  263. if (assoclen + req->cryptlen == 0) {
  264. scatterwalk_map_and_copy(rctx->auth_tag, req->dst, 0, authlen,
  265. 1);
  266. return 0;
  267. }
  268. dd = omap_aes_find_dev(rctx);
  269. if (!dd)
  270. return -ENODEV;
  271. rctx->mode = mode;
  272. return omap_aes_gcm_handle_queue(dd, req);
  273. }
  274. int omap_aes_gcm_encrypt(struct aead_request *req)
  275. {
  276. struct omap_aes_reqctx *rctx = aead_request_ctx(req);
  277. memcpy(rctx->iv, req->iv, 12);
  278. return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM);
  279. }
  280. int omap_aes_gcm_decrypt(struct aead_request *req)
  281. {
  282. struct omap_aes_reqctx *rctx = aead_request_ctx(req);
  283. memcpy(rctx->iv, req->iv, 12);
  284. return omap_aes_gcm_crypt(req, FLAGS_GCM);
  285. }
  286. int omap_aes_4106gcm_encrypt(struct aead_request *req)
  287. {
  288. struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
  289. struct omap_aes_reqctx *rctx = aead_request_ctx(req);
  290. memcpy(rctx->iv, ctx->nonce, 4);
  291. memcpy(rctx->iv + 4, req->iv, 8);
  292. return omap_aes_gcm_crypt(req, FLAGS_ENCRYPT | FLAGS_GCM |
  293. FLAGS_RFC4106_GCM);
  294. }
  295. int omap_aes_4106gcm_decrypt(struct aead_request *req)
  296. {
  297. struct omap_aes_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
  298. struct omap_aes_reqctx *rctx = aead_request_ctx(req);
  299. memcpy(rctx->iv, ctx->nonce, 4);
  300. memcpy(rctx->iv + 4, req->iv, 8);
  301. return omap_aes_gcm_crypt(req, FLAGS_GCM | FLAGS_RFC4106_GCM);
  302. }
  303. int omap_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
  304. unsigned int keylen)
  305. {
  306. struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
  307. if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  308. keylen != AES_KEYSIZE_256)
  309. return -EINVAL;
  310. memcpy(ctx->key, key, keylen);
  311. ctx->keylen = keylen;
  312. return 0;
  313. }
  314. int omap_aes_4106gcm_setkey(struct crypto_aead *tfm, const u8 *key,
  315. unsigned int keylen)
  316. {
  317. struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
  318. if (keylen < 4)
  319. return -EINVAL;
  320. keylen -= 4;
  321. if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  322. keylen != AES_KEYSIZE_256)
  323. return -EINVAL;
  324. memcpy(ctx->key, key, keylen);
  325. memcpy(ctx->nonce, key + keylen, 4);
  326. ctx->keylen = keylen;
  327. return 0;
  328. }