rk3288_crypto.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. /*
  2. * Crypto acceleration support for Rockchip RK3288
  3. *
  4. * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
  5. *
  6. * Author: Zain Wang <zain.wang@rock-chips.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
  13. */
  14. #include "rk3288_crypto.h"
  15. #include <linux/module.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/of.h>
  18. #include <linux/clk.h>
  19. #include <linux/crypto.h>
  20. #include <linux/reset.h>
  21. static int rk_crypto_enable_clk(struct rk_crypto_info *dev)
  22. {
  23. int err;
  24. err = clk_prepare_enable(dev->sclk);
  25. if (err) {
  26. dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n",
  27. __func__, __LINE__);
  28. goto err_return;
  29. }
  30. err = clk_prepare_enable(dev->aclk);
  31. if (err) {
  32. dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n",
  33. __func__, __LINE__);
  34. goto err_aclk;
  35. }
  36. err = clk_prepare_enable(dev->hclk);
  37. if (err) {
  38. dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n",
  39. __func__, __LINE__);
  40. goto err_hclk;
  41. }
  42. err = clk_prepare_enable(dev->dmaclk);
  43. if (err) {
  44. dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n",
  45. __func__, __LINE__);
  46. goto err_dmaclk;
  47. }
  48. return err;
  49. err_dmaclk:
  50. clk_disable_unprepare(dev->hclk);
  51. err_hclk:
  52. clk_disable_unprepare(dev->aclk);
  53. err_aclk:
  54. clk_disable_unprepare(dev->sclk);
  55. err_return:
  56. return err;
  57. }
  58. static void rk_crypto_disable_clk(struct rk_crypto_info *dev)
  59. {
  60. clk_disable_unprepare(dev->dmaclk);
  61. clk_disable_unprepare(dev->hclk);
  62. clk_disable_unprepare(dev->aclk);
  63. clk_disable_unprepare(dev->sclk);
  64. }
  65. static int check_alignment(struct scatterlist *sg_src,
  66. struct scatterlist *sg_dst,
  67. int align_mask)
  68. {
  69. int in, out, align;
  70. in = IS_ALIGNED((uint32_t)sg_src->offset, 4) &&
  71. IS_ALIGNED((uint32_t)sg_src->length, align_mask);
  72. if (!sg_dst)
  73. return in;
  74. out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) &&
  75. IS_ALIGNED((uint32_t)sg_dst->length, align_mask);
  76. align = in && out;
  77. return (align && (sg_src->length == sg_dst->length));
  78. }
  79. static int rk_load_data(struct rk_crypto_info *dev,
  80. struct scatterlist *sg_src,
  81. struct scatterlist *sg_dst)
  82. {
  83. unsigned int count;
  84. dev->aligned = dev->aligned ?
  85. check_alignment(sg_src, sg_dst, dev->align_size) :
  86. dev->aligned;
  87. if (dev->aligned) {
  88. count = min(dev->left_bytes, sg_src->length);
  89. dev->left_bytes -= count;
  90. if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) {
  91. dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n",
  92. __func__, __LINE__);
  93. return -EINVAL;
  94. }
  95. dev->addr_in = sg_dma_address(sg_src);
  96. if (sg_dst) {
  97. if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) {
  98. dev_err(dev->dev,
  99. "[%s:%d] dma_map_sg(dst) error\n",
  100. __func__, __LINE__);
  101. dma_unmap_sg(dev->dev, sg_src, 1,
  102. DMA_TO_DEVICE);
  103. return -EINVAL;
  104. }
  105. dev->addr_out = sg_dma_address(sg_dst);
  106. }
  107. } else {
  108. count = (dev->left_bytes > PAGE_SIZE) ?
  109. PAGE_SIZE : dev->left_bytes;
  110. if (!sg_pcopy_to_buffer(dev->first, dev->nents,
  111. dev->addr_vir, count,
  112. dev->total - dev->left_bytes)) {
  113. dev_err(dev->dev, "[%s:%d] pcopy err\n",
  114. __func__, __LINE__);
  115. return -EINVAL;
  116. }
  117. dev->left_bytes -= count;
  118. sg_init_one(&dev->sg_tmp, dev->addr_vir, count);
  119. if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) {
  120. dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n",
  121. __func__, __LINE__);
  122. return -ENOMEM;
  123. }
  124. dev->addr_in = sg_dma_address(&dev->sg_tmp);
  125. if (sg_dst) {
  126. if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1,
  127. DMA_FROM_DEVICE)) {
  128. dev_err(dev->dev,
  129. "[%s:%d] dma_map_sg(sg_tmp) error\n",
  130. __func__, __LINE__);
  131. dma_unmap_sg(dev->dev, &dev->sg_tmp, 1,
  132. DMA_TO_DEVICE);
  133. return -ENOMEM;
  134. }
  135. dev->addr_out = sg_dma_address(&dev->sg_tmp);
  136. }
  137. }
  138. dev->count = count;
  139. return 0;
  140. }
  141. static void rk_unload_data(struct rk_crypto_info *dev)
  142. {
  143. struct scatterlist *sg_in, *sg_out;
  144. sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp;
  145. dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE);
  146. if (dev->sg_dst) {
  147. sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp;
  148. dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE);
  149. }
  150. }
  151. static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id)
  152. {
  153. struct rk_crypto_info *dev = platform_get_drvdata(dev_id);
  154. u32 interrupt_status;
  155. int err = 0;
  156. spin_lock(&dev->lock);
  157. interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS);
  158. CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status);
  159. if (interrupt_status & 0x0a) {
  160. dev_warn(dev->dev, "DMA Error\n");
  161. err = -EFAULT;
  162. } else if (interrupt_status & 0x05) {
  163. err = dev->update(dev);
  164. }
  165. if (err)
  166. dev->complete(dev, err);
  167. spin_unlock(&dev->lock);
  168. return IRQ_HANDLED;
  169. }
  170. static void rk_crypto_tasklet_cb(unsigned long data)
  171. {
  172. struct rk_crypto_info *dev = (struct rk_crypto_info *)data;
  173. struct crypto_async_request *async_req, *backlog;
  174. unsigned long flags;
  175. int err = 0;
  176. spin_lock_irqsave(&dev->lock, flags);
  177. backlog = crypto_get_backlog(&dev->queue);
  178. async_req = crypto_dequeue_request(&dev->queue);
  179. spin_unlock_irqrestore(&dev->lock, flags);
  180. if (!async_req) {
  181. dev_err(dev->dev, "async_req is NULL !!\n");
  182. return;
  183. }
  184. if (backlog) {
  185. backlog->complete(backlog, -EINPROGRESS);
  186. backlog = NULL;
  187. }
  188. if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER)
  189. dev->ablk_req = ablkcipher_request_cast(async_req);
  190. else
  191. dev->ahash_req = ahash_request_cast(async_req);
  192. err = dev->start(dev);
  193. if (err)
  194. dev->complete(dev, err);
  195. }
  196. static struct rk_crypto_tmp *rk_cipher_algs[] = {
  197. &rk_ecb_aes_alg,
  198. &rk_cbc_aes_alg,
  199. &rk_ecb_des_alg,
  200. &rk_cbc_des_alg,
  201. &rk_ecb_des3_ede_alg,
  202. &rk_cbc_des3_ede_alg,
  203. &rk_ahash_sha1,
  204. &rk_ahash_sha256,
  205. &rk_ahash_md5,
  206. };
  207. static int rk_crypto_register(struct rk_crypto_info *crypto_info)
  208. {
  209. unsigned int i, k;
  210. int err = 0;
  211. for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
  212. rk_cipher_algs[i]->dev = crypto_info;
  213. if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
  214. err = crypto_register_alg(
  215. &rk_cipher_algs[i]->alg.crypto);
  216. else
  217. err = crypto_register_ahash(
  218. &rk_cipher_algs[i]->alg.hash);
  219. if (err)
  220. goto err_cipher_algs;
  221. }
  222. return 0;
  223. err_cipher_algs:
  224. for (k = 0; k < i; k++) {
  225. if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
  226. crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto);
  227. else
  228. crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
  229. }
  230. return err;
  231. }
  232. static void rk_crypto_unregister(void)
  233. {
  234. unsigned int i;
  235. for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) {
  236. if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER)
  237. crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto);
  238. else
  239. crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash);
  240. }
  241. }
  242. static void rk_crypto_action(void *data)
  243. {
  244. struct rk_crypto_info *crypto_info = data;
  245. reset_control_assert(crypto_info->rst);
  246. }
  247. static const struct of_device_id crypto_of_id_table[] = {
  248. { .compatible = "rockchip,rk3288-crypto" },
  249. {}
  250. };
  251. MODULE_DEVICE_TABLE(of, crypto_of_id_table);
  252. static int rk_crypto_probe(struct platform_device *pdev)
  253. {
  254. struct resource *res;
  255. struct device *dev = &pdev->dev;
  256. struct rk_crypto_info *crypto_info;
  257. int err = 0;
  258. crypto_info = devm_kzalloc(&pdev->dev,
  259. sizeof(*crypto_info), GFP_KERNEL);
  260. if (!crypto_info) {
  261. err = -ENOMEM;
  262. goto err_crypto;
  263. }
  264. crypto_info->rst = devm_reset_control_get(dev, "crypto-rst");
  265. if (IS_ERR(crypto_info->rst)) {
  266. err = PTR_ERR(crypto_info->rst);
  267. goto err_crypto;
  268. }
  269. reset_control_assert(crypto_info->rst);
  270. usleep_range(10, 20);
  271. reset_control_deassert(crypto_info->rst);
  272. err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info);
  273. if (err)
  274. goto err_crypto;
  275. spin_lock_init(&crypto_info->lock);
  276. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  277. crypto_info->reg = devm_ioremap_resource(&pdev->dev, res);
  278. if (IS_ERR(crypto_info->reg)) {
  279. err = PTR_ERR(crypto_info->reg);
  280. goto err_crypto;
  281. }
  282. crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk");
  283. if (IS_ERR(crypto_info->aclk)) {
  284. err = PTR_ERR(crypto_info->aclk);
  285. goto err_crypto;
  286. }
  287. crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk");
  288. if (IS_ERR(crypto_info->hclk)) {
  289. err = PTR_ERR(crypto_info->hclk);
  290. goto err_crypto;
  291. }
  292. crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk");
  293. if (IS_ERR(crypto_info->sclk)) {
  294. err = PTR_ERR(crypto_info->sclk);
  295. goto err_crypto;
  296. }
  297. crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk");
  298. if (IS_ERR(crypto_info->dmaclk)) {
  299. err = PTR_ERR(crypto_info->dmaclk);
  300. goto err_crypto;
  301. }
  302. crypto_info->irq = platform_get_irq(pdev, 0);
  303. if (crypto_info->irq < 0) {
  304. dev_warn(crypto_info->dev,
  305. "control Interrupt is not available.\n");
  306. err = crypto_info->irq;
  307. goto err_crypto;
  308. }
  309. err = devm_request_irq(&pdev->dev, crypto_info->irq,
  310. rk_crypto_irq_handle, IRQF_SHARED,
  311. "rk-crypto", pdev);
  312. if (err) {
  313. dev_err(crypto_info->dev, "irq request failed.\n");
  314. goto err_crypto;
  315. }
  316. crypto_info->dev = &pdev->dev;
  317. platform_set_drvdata(pdev, crypto_info);
  318. tasklet_init(&crypto_info->crypto_tasklet,
  319. rk_crypto_tasklet_cb, (unsigned long)crypto_info);
  320. crypto_init_queue(&crypto_info->queue, 50);
  321. crypto_info->enable_clk = rk_crypto_enable_clk;
  322. crypto_info->disable_clk = rk_crypto_disable_clk;
  323. crypto_info->load_data = rk_load_data;
  324. crypto_info->unload_data = rk_unload_data;
  325. err = rk_crypto_register(crypto_info);
  326. if (err) {
  327. dev_err(dev, "err in register alg");
  328. goto err_register_alg;
  329. }
  330. dev_info(dev, "Crypto Accelerator successfully registered\n");
  331. return 0;
  332. err_register_alg:
  333. tasklet_kill(&crypto_info->crypto_tasklet);
  334. err_crypto:
  335. return err;
  336. }
  337. static int rk_crypto_remove(struct platform_device *pdev)
  338. {
  339. struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
  340. rk_crypto_unregister();
  341. tasklet_kill(&crypto_tmp->crypto_tasklet);
  342. return 0;
  343. }
  344. static struct platform_driver crypto_driver = {
  345. .probe = rk_crypto_probe,
  346. .remove = rk_crypto_remove,
  347. .driver = {
  348. .name = "rk3288-crypto",
  349. .of_match_table = crypto_of_id_table,
  350. },
  351. };
  352. module_platform_driver(crypto_driver);
  353. MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>");
  354. MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine");
  355. MODULE_LICENSE("GPL");