cesa.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. /*
  2. * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
  3. * that can be found on the following platform: Orion, Kirkwood, Armada. This
  4. * driver supports the TDMA engine on platforms on which it is available.
  5. *
  6. * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
  7. * Author: Arnaud Ebalard <arno@natisbad.org>
  8. *
  9. * This work is based on an initial version written by
  10. * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License version 2 as published
  14. * by the Free Software Foundation.
  15. */
  16. #include <linux/delay.h>
  17. #include <linux/genalloc.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/io.h>
  20. #include <linux/kthread.h>
  21. #include <linux/mbus.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/slab.h>
  25. #include <linux/module.h>
  26. #include <linux/clk.h>
  27. #include <linux/of.h>
  28. #include <linux/of_platform.h>
  29. #include <linux/of_irq.h>
  30. #include "cesa.h"
  31. /* Limit of the crypto queue before reaching the backlog */
  32. #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
  33. static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
  34. module_param_named(allhwsupport, allhwsupport, int, 0444);
  35. MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
  36. struct mv_cesa_dev *cesa_dev;
  37. struct crypto_async_request *
  38. mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
  39. struct crypto_async_request **backlog)
  40. {
  41. struct crypto_async_request *req;
  42. *backlog = crypto_get_backlog(&engine->queue);
  43. req = crypto_dequeue_request(&engine->queue);
  44. if (!req)
  45. return NULL;
  46. return req;
  47. }
  48. static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
  49. {
  50. struct crypto_async_request *req = NULL, *backlog = NULL;
  51. struct mv_cesa_ctx *ctx;
  52. spin_lock_bh(&engine->lock);
  53. if (!engine->req) {
  54. req = mv_cesa_dequeue_req_locked(engine, &backlog);
  55. engine->req = req;
  56. }
  57. spin_unlock_bh(&engine->lock);
  58. if (!req)
  59. return;
  60. if (backlog)
  61. backlog->complete(backlog, -EINPROGRESS);
  62. ctx = crypto_tfm_ctx(req->tfm);
  63. ctx->ops->step(req);
  64. return;
  65. }
  66. static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
  67. {
  68. struct crypto_async_request *req;
  69. struct mv_cesa_ctx *ctx;
  70. int res;
  71. req = engine->req;
  72. ctx = crypto_tfm_ctx(req->tfm);
  73. res = ctx->ops->process(req, status);
  74. if (res == 0) {
  75. ctx->ops->complete(req);
  76. mv_cesa_engine_enqueue_complete_request(engine, req);
  77. } else if (res == -EINPROGRESS) {
  78. ctx->ops->step(req);
  79. }
  80. return res;
  81. }
  82. static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
  83. {
  84. if (engine->chain.first && engine->chain.last)
  85. return mv_cesa_tdma_process(engine, status);
  86. return mv_cesa_std_process(engine, status);
  87. }
  88. static inline void
  89. mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
  90. int res)
  91. {
  92. ctx->ops->cleanup(req);
  93. local_bh_disable();
  94. req->complete(req, res);
  95. local_bh_enable();
  96. }
  97. static irqreturn_t mv_cesa_int(int irq, void *priv)
  98. {
  99. struct mv_cesa_engine *engine = priv;
  100. struct crypto_async_request *req;
  101. struct mv_cesa_ctx *ctx;
  102. u32 status, mask;
  103. irqreturn_t ret = IRQ_NONE;
  104. while (true) {
  105. int res;
  106. mask = mv_cesa_get_int_mask(engine);
  107. status = readl(engine->regs + CESA_SA_INT_STATUS);
  108. if (!(status & mask))
  109. break;
  110. /*
  111. * TODO: avoid clearing the FPGA_INT_STATUS if this not
  112. * relevant on some platforms.
  113. */
  114. writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
  115. writel(~status, engine->regs + CESA_SA_INT_STATUS);
  116. /* Process fetched requests */
  117. res = mv_cesa_int_process(engine, status & mask);
  118. ret = IRQ_HANDLED;
  119. spin_lock_bh(&engine->lock);
  120. req = engine->req;
  121. if (res != -EINPROGRESS)
  122. engine->req = NULL;
  123. spin_unlock_bh(&engine->lock);
  124. ctx = crypto_tfm_ctx(req->tfm);
  125. if (res && res != -EINPROGRESS)
  126. mv_cesa_complete_req(ctx, req, res);
  127. /* Launch the next pending request */
  128. mv_cesa_rearm_engine(engine);
  129. /* Iterate over the complete queue */
  130. while (true) {
  131. req = mv_cesa_engine_dequeue_complete_request(engine);
  132. if (!req)
  133. break;
  134. ctx = crypto_tfm_ctx(req->tfm);
  135. mv_cesa_complete_req(ctx, req, 0);
  136. }
  137. }
  138. return ret;
  139. }
  140. int mv_cesa_queue_req(struct crypto_async_request *req,
  141. struct mv_cesa_req *creq)
  142. {
  143. int ret;
  144. struct mv_cesa_engine *engine = creq->engine;
  145. spin_lock_bh(&engine->lock);
  146. ret = crypto_enqueue_request(&engine->queue, req);
  147. if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
  148. (ret == -EINPROGRESS ||
  149. (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
  150. mv_cesa_tdma_chain(engine, creq);
  151. spin_unlock_bh(&engine->lock);
  152. if (ret != -EINPROGRESS)
  153. return ret;
  154. mv_cesa_rearm_engine(engine);
  155. return -EINPROGRESS;
  156. }
  157. static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
  158. {
  159. int ret;
  160. int i, j;
  161. for (i = 0; i < cesa->caps->ncipher_algs; i++) {
  162. ret = crypto_register_alg(cesa->caps->cipher_algs[i]);
  163. if (ret)
  164. goto err_unregister_crypto;
  165. }
  166. for (i = 0; i < cesa->caps->nahash_algs; i++) {
  167. ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
  168. if (ret)
  169. goto err_unregister_ahash;
  170. }
  171. return 0;
  172. err_unregister_ahash:
  173. for (j = 0; j < i; j++)
  174. crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
  175. i = cesa->caps->ncipher_algs;
  176. err_unregister_crypto:
  177. for (j = 0; j < i; j++)
  178. crypto_unregister_alg(cesa->caps->cipher_algs[j]);
  179. return ret;
  180. }
  181. static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
  182. {
  183. int i;
  184. for (i = 0; i < cesa->caps->nahash_algs; i++)
  185. crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
  186. for (i = 0; i < cesa->caps->ncipher_algs; i++)
  187. crypto_unregister_alg(cesa->caps->cipher_algs[i]);
  188. }
  189. static struct crypto_alg *orion_cipher_algs[] = {
  190. &mv_cesa_ecb_des_alg,
  191. &mv_cesa_cbc_des_alg,
  192. &mv_cesa_ecb_des3_ede_alg,
  193. &mv_cesa_cbc_des3_ede_alg,
  194. &mv_cesa_ecb_aes_alg,
  195. &mv_cesa_cbc_aes_alg,
  196. };
  197. static struct ahash_alg *orion_ahash_algs[] = {
  198. &mv_md5_alg,
  199. &mv_sha1_alg,
  200. &mv_ahmac_md5_alg,
  201. &mv_ahmac_sha1_alg,
  202. };
  203. static struct crypto_alg *armada_370_cipher_algs[] = {
  204. &mv_cesa_ecb_des_alg,
  205. &mv_cesa_cbc_des_alg,
  206. &mv_cesa_ecb_des3_ede_alg,
  207. &mv_cesa_cbc_des3_ede_alg,
  208. &mv_cesa_ecb_aes_alg,
  209. &mv_cesa_cbc_aes_alg,
  210. };
  211. static struct ahash_alg *armada_370_ahash_algs[] = {
  212. &mv_md5_alg,
  213. &mv_sha1_alg,
  214. &mv_sha256_alg,
  215. &mv_ahmac_md5_alg,
  216. &mv_ahmac_sha1_alg,
  217. &mv_ahmac_sha256_alg,
  218. };
  219. static const struct mv_cesa_caps orion_caps = {
  220. .nengines = 1,
  221. .cipher_algs = orion_cipher_algs,
  222. .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
  223. .ahash_algs = orion_ahash_algs,
  224. .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
  225. .has_tdma = false,
  226. };
  227. static const struct mv_cesa_caps kirkwood_caps = {
  228. .nengines = 1,
  229. .cipher_algs = orion_cipher_algs,
  230. .ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
  231. .ahash_algs = orion_ahash_algs,
  232. .nahash_algs = ARRAY_SIZE(orion_ahash_algs),
  233. .has_tdma = true,
  234. };
  235. static const struct mv_cesa_caps armada_370_caps = {
  236. .nengines = 1,
  237. .cipher_algs = armada_370_cipher_algs,
  238. .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
  239. .ahash_algs = armada_370_ahash_algs,
  240. .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
  241. .has_tdma = true,
  242. };
  243. static const struct mv_cesa_caps armada_xp_caps = {
  244. .nengines = 2,
  245. .cipher_algs = armada_370_cipher_algs,
  246. .ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
  247. .ahash_algs = armada_370_ahash_algs,
  248. .nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
  249. .has_tdma = true,
  250. };
  251. static const struct of_device_id mv_cesa_of_match_table[] = {
  252. { .compatible = "marvell,orion-crypto", .data = &orion_caps },
  253. { .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
  254. { .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
  255. { .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
  256. { .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
  257. { .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
  258. { .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
  259. {}
  260. };
  261. MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
  262. static void
  263. mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
  264. const struct mbus_dram_target_info *dram)
  265. {
  266. void __iomem *iobase = engine->regs;
  267. int i;
  268. for (i = 0; i < 4; i++) {
  269. writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
  270. writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
  271. }
  272. for (i = 0; i < dram->num_cs; i++) {
  273. const struct mbus_dram_window *cs = dram->cs + i;
  274. writel(((cs->size - 1) & 0xffff0000) |
  275. (cs->mbus_attr << 8) |
  276. (dram->mbus_dram_target_id << 4) | 1,
  277. iobase + CESA_TDMA_WINDOW_CTRL(i));
  278. writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
  279. }
  280. }
  281. static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
  282. {
  283. struct device *dev = cesa->dev;
  284. struct mv_cesa_dev_dma *dma;
  285. if (!cesa->caps->has_tdma)
  286. return 0;
  287. dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
  288. if (!dma)
  289. return -ENOMEM;
  290. dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
  291. sizeof(struct mv_cesa_tdma_desc),
  292. 16, 0);
  293. if (!dma->tdma_desc_pool)
  294. return -ENOMEM;
  295. dma->op_pool = dmam_pool_create("cesa_op", dev,
  296. sizeof(struct mv_cesa_op_ctx), 16, 0);
  297. if (!dma->op_pool)
  298. return -ENOMEM;
  299. dma->cache_pool = dmam_pool_create("cesa_cache", dev,
  300. CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
  301. if (!dma->cache_pool)
  302. return -ENOMEM;
  303. dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
  304. if (!dma->padding_pool)
  305. return -ENOMEM;
  306. cesa->dma = dma;
  307. return 0;
  308. }
  309. static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
  310. {
  311. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  312. struct mv_cesa_engine *engine = &cesa->engines[idx];
  313. const char *res_name = "sram";
  314. struct resource *res;
  315. engine->pool = of_gen_pool_get(cesa->dev->of_node,
  316. "marvell,crypto-srams", idx);
  317. if (engine->pool) {
  318. engine->sram = gen_pool_dma_alloc(engine->pool,
  319. cesa->sram_size,
  320. &engine->sram_dma);
  321. if (engine->sram)
  322. return 0;
  323. engine->pool = NULL;
  324. return -ENOMEM;
  325. }
  326. if (cesa->caps->nengines > 1) {
  327. if (!idx)
  328. res_name = "sram0";
  329. else
  330. res_name = "sram1";
  331. }
  332. res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  333. res_name);
  334. if (!res || resource_size(res) < cesa->sram_size)
  335. return -EINVAL;
  336. engine->sram = devm_ioremap_resource(cesa->dev, res);
  337. if (IS_ERR(engine->sram))
  338. return PTR_ERR(engine->sram);
  339. engine->sram_dma = phys_to_dma(cesa->dev,
  340. (phys_addr_t)res->start);
  341. return 0;
  342. }
  343. static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
  344. {
  345. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  346. struct mv_cesa_engine *engine = &cesa->engines[idx];
  347. if (!engine->pool)
  348. return;
  349. gen_pool_free(engine->pool, (unsigned long)engine->sram,
  350. cesa->sram_size);
  351. }
  352. static int mv_cesa_probe(struct platform_device *pdev)
  353. {
  354. const struct mv_cesa_caps *caps = &orion_caps;
  355. const struct mbus_dram_target_info *dram;
  356. const struct of_device_id *match;
  357. struct device *dev = &pdev->dev;
  358. struct mv_cesa_dev *cesa;
  359. struct mv_cesa_engine *engines;
  360. struct resource *res;
  361. int irq, ret, i;
  362. u32 sram_size;
  363. if (cesa_dev) {
  364. dev_err(&pdev->dev, "Only one CESA device authorized\n");
  365. return -EEXIST;
  366. }
  367. if (dev->of_node) {
  368. match = of_match_node(mv_cesa_of_match_table, dev->of_node);
  369. if (!match || !match->data)
  370. return -ENOTSUPP;
  371. caps = match->data;
  372. }
  373. if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport)
  374. return -ENOTSUPP;
  375. cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
  376. if (!cesa)
  377. return -ENOMEM;
  378. cesa->caps = caps;
  379. cesa->dev = dev;
  380. sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
  381. of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
  382. &sram_size);
  383. if (sram_size < CESA_SA_MIN_SRAM_SIZE)
  384. sram_size = CESA_SA_MIN_SRAM_SIZE;
  385. cesa->sram_size = sram_size;
  386. cesa->engines = devm_kzalloc(dev, caps->nengines * sizeof(*engines),
  387. GFP_KERNEL);
  388. if (!cesa->engines)
  389. return -ENOMEM;
  390. spin_lock_init(&cesa->lock);
  391. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
  392. cesa->regs = devm_ioremap_resource(dev, res);
  393. if (IS_ERR(cesa->regs))
  394. return PTR_ERR(cesa->regs);
  395. ret = mv_cesa_dev_dma_init(cesa);
  396. if (ret)
  397. return ret;
  398. dram = mv_mbus_dram_info_nooverlap();
  399. platform_set_drvdata(pdev, cesa);
  400. for (i = 0; i < caps->nengines; i++) {
  401. struct mv_cesa_engine *engine = &cesa->engines[i];
  402. char res_name[7];
  403. engine->id = i;
  404. spin_lock_init(&engine->lock);
  405. ret = mv_cesa_get_sram(pdev, i);
  406. if (ret)
  407. goto err_cleanup;
  408. irq = platform_get_irq(pdev, i);
  409. if (irq < 0) {
  410. ret = irq;
  411. goto err_cleanup;
  412. }
  413. /*
  414. * Not all platforms can gate the CESA clocks: do not complain
  415. * if the clock does not exist.
  416. */
  417. snprintf(res_name, sizeof(res_name), "cesa%d", i);
  418. engine->clk = devm_clk_get(dev, res_name);
  419. if (IS_ERR(engine->clk)) {
  420. engine->clk = devm_clk_get(dev, NULL);
  421. if (IS_ERR(engine->clk))
  422. engine->clk = NULL;
  423. }
  424. snprintf(res_name, sizeof(res_name), "cesaz%d", i);
  425. engine->zclk = devm_clk_get(dev, res_name);
  426. if (IS_ERR(engine->zclk))
  427. engine->zclk = NULL;
  428. ret = clk_prepare_enable(engine->clk);
  429. if (ret)
  430. goto err_cleanup;
  431. ret = clk_prepare_enable(engine->zclk);
  432. if (ret)
  433. goto err_cleanup;
  434. engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
  435. if (dram && cesa->caps->has_tdma)
  436. mv_cesa_conf_mbus_windows(engine, dram);
  437. writel(0, engine->regs + CESA_SA_INT_STATUS);
  438. writel(CESA_SA_CFG_STOP_DIG_ERR,
  439. engine->regs + CESA_SA_CFG);
  440. writel(engine->sram_dma & CESA_SA_SRAM_MSK,
  441. engine->regs + CESA_SA_DESC_P0);
  442. ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
  443. IRQF_ONESHOT,
  444. dev_name(&pdev->dev),
  445. engine);
  446. if (ret)
  447. goto err_cleanup;
  448. crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
  449. atomic_set(&engine->load, 0);
  450. INIT_LIST_HEAD(&engine->complete_queue);
  451. }
  452. cesa_dev = cesa;
  453. ret = mv_cesa_add_algs(cesa);
  454. if (ret) {
  455. cesa_dev = NULL;
  456. goto err_cleanup;
  457. }
  458. dev_info(dev, "CESA device successfully registered\n");
  459. return 0;
  460. err_cleanup:
  461. for (i = 0; i < caps->nengines; i++) {
  462. clk_disable_unprepare(cesa->engines[i].zclk);
  463. clk_disable_unprepare(cesa->engines[i].clk);
  464. mv_cesa_put_sram(pdev, i);
  465. }
  466. return ret;
  467. }
  468. static int mv_cesa_remove(struct platform_device *pdev)
  469. {
  470. struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
  471. int i;
  472. mv_cesa_remove_algs(cesa);
  473. for (i = 0; i < cesa->caps->nengines; i++) {
  474. clk_disable_unprepare(cesa->engines[i].zclk);
  475. clk_disable_unprepare(cesa->engines[i].clk);
  476. mv_cesa_put_sram(pdev, i);
  477. }
  478. return 0;
  479. }
  480. static struct platform_driver marvell_cesa = {
  481. .probe = mv_cesa_probe,
  482. .remove = mv_cesa_remove,
  483. .driver = {
  484. .name = "marvell-cesa",
  485. .of_match_table = mv_cesa_of_match_table,
  486. },
  487. };
  488. module_platform_driver(marvell_cesa);
  489. MODULE_ALIAS("platform:mv_crypto");
  490. MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
  491. MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
  492. MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
  493. MODULE_LICENSE("GPL v2");