tegra-aes.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097
  1. /*
  2. * drivers/crypto/tegra-aes.c
  3. *
  4. * Driver for NVIDIA Tegra AES hardware engine residing inside the
  5. * Bit Stream Engine for Video (BSEV) hardware block.
  6. *
  7. * The programming sequence for this engine is with the help
  8. * of commands which travel via a command queue residing between the
  9. * CPU and the BSEV block. The BSEV engine has an internal RAM (VRAM)
  10. * where the final input plaintext, keys and the IV have to be copied
  11. * before starting the encrypt/decrypt operation.
  12. *
  13. * Copyright (c) 2010, NVIDIA Corporation.
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2 of the License, or
  18. * (at your option) any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful, but WITHOUT
  21. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  22. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  23. * more details.
  24. *
  25. * You should have received a copy of the GNU General Public License along
  26. * with this program; if not, write to the Free Software Foundation, Inc.,
  27. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  28. */
  29. #include <linux/module.h>
  30. #include <linux/init.h>
  31. #include <linux/errno.h>
  32. #include <linux/kernel.h>
  33. #include <linux/clk.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/scatterlist.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/io.h>
  38. #include <linux/mutex.h>
  39. #include <linux/interrupt.h>
  40. #include <linux/completion.h>
  41. #include <linux/workqueue.h>
  42. #include <mach/clk.h>
  43. #include <crypto/scatterwalk.h>
  44. #include <crypto/aes.h>
  45. #include <crypto/internal/rng.h>
  46. #include "tegra-aes.h"
  47. #define FLAGS_MODE_MASK 0x00FF
  48. #define FLAGS_ENCRYPT BIT(0)
  49. #define FLAGS_CBC BIT(1)
  50. #define FLAGS_GIV BIT(2)
  51. #define FLAGS_RNG BIT(3)
  52. #define FLAGS_OFB BIT(4)
  53. #define FLAGS_NEW_KEY BIT(5)
  54. #define FLAGS_NEW_IV BIT(6)
  55. #define FLAGS_INIT BIT(7)
  56. #define FLAGS_FAST BIT(8)
  57. #define FLAGS_BUSY 9
  58. /*
  59. * Defines AES engine Max process bytes size in one go, which takes 1 msec.
  60. * AES engine spends about 176 cycles/16-bytes or 11 cycles/byte
  61. * The duration CPU can use the BSE to 1 msec, then the number of available
  62. * cycles of AVP/BSE is 216K. In this duration, AES can process 216/11 ~= 19KB
  63. * Based on this AES_HW_DMA_BUFFER_SIZE_BYTES is configured to 16KB.
  64. */
  65. #define AES_HW_DMA_BUFFER_SIZE_BYTES 0x4000
  66. /*
  67. * The key table length is 64 bytes
  68. * (This includes first upto 32 bytes key + 16 bytes original initial vector
  69. * and 16 bytes updated initial vector)
  70. */
  71. #define AES_HW_KEY_TABLE_LENGTH_BYTES 64
  72. /*
  73. * The memory being used is divides as follows:
  74. * 1. Key - 32 bytes
  75. * 2. Original IV - 16 bytes
  76. * 3. Updated IV - 16 bytes
  77. * 4. Key schedule - 256 bytes
  78. *
  79. * 1+2+3 constitute the hw key table.
  80. */
  81. #define AES_HW_IV_SIZE 16
  82. #define AES_HW_KEYSCHEDULE_LEN 256
  83. #define AES_IVKEY_SIZE (AES_HW_KEY_TABLE_LENGTH_BYTES + AES_HW_KEYSCHEDULE_LEN)
  84. /* Define commands required for AES operation */
  85. enum {
  86. CMD_BLKSTARTENGINE = 0x0E,
  87. CMD_DMASETUP = 0x10,
  88. CMD_DMACOMPLETE = 0x11,
  89. CMD_SETTABLE = 0x15,
  90. CMD_MEMDMAVD = 0x22,
  91. };
  92. /* Define sub-commands */
  93. enum {
  94. SUBCMD_VRAM_SEL = 0x1,
  95. SUBCMD_CRYPTO_TABLE_SEL = 0x3,
  96. SUBCMD_KEY_TABLE_SEL = 0x8,
  97. };
  98. /* memdma_vd command */
  99. #define MEMDMA_DIR_DTOVRAM 0 /* sdram -> vram */
  100. #define MEMDMA_DIR_VTODRAM 1 /* vram -> sdram */
  101. #define MEMDMA_DIR_SHIFT 25
  102. #define MEMDMA_NUM_WORDS_SHIFT 12
  103. /* command queue bit shifts */
  104. enum {
  105. CMDQ_KEYTABLEADDR_SHIFT = 0,
  106. CMDQ_KEYTABLEID_SHIFT = 17,
  107. CMDQ_VRAMSEL_SHIFT = 23,
  108. CMDQ_TABLESEL_SHIFT = 24,
  109. CMDQ_OPCODE_SHIFT = 26,
  110. };
  111. /*
  112. * The secure key slot contains a unique secure key generated
  113. * and loaded by the bootloader. This slot is marked as non-accessible
  114. * to the kernel.
  115. */
  116. #define SSK_SLOT_NUM 4
  117. #define AES_NR_KEYSLOTS 8
  118. #define TEGRA_AES_QUEUE_LENGTH 50
  119. #define DEFAULT_RNG_BLK_SZ 16
  120. /* The command queue depth */
  121. #define AES_HW_MAX_ICQ_LENGTH 5
  122. struct tegra_aes_slot {
  123. struct list_head node;
  124. int slot_num;
  125. };
  126. static struct tegra_aes_slot ssk = {
  127. .slot_num = SSK_SLOT_NUM,
  128. };
  129. struct tegra_aes_reqctx {
  130. unsigned long mode;
  131. };
  132. struct tegra_aes_dev {
  133. struct device *dev;
  134. void __iomem *io_base;
  135. dma_addr_t ivkey_phys_base;
  136. void __iomem *ivkey_base;
  137. struct clk *aes_clk;
  138. struct tegra_aes_ctx *ctx;
  139. int irq;
  140. unsigned long flags;
  141. struct completion op_complete;
  142. u32 *buf_in;
  143. dma_addr_t dma_buf_in;
  144. u32 *buf_out;
  145. dma_addr_t dma_buf_out;
  146. u8 *iv;
  147. u8 dt[DEFAULT_RNG_BLK_SZ];
  148. int ivlen;
  149. u64 ctr;
  150. spinlock_t lock;
  151. struct crypto_queue queue;
  152. struct tegra_aes_slot *slots;
  153. struct ablkcipher_request *req;
  154. size_t total;
  155. struct scatterlist *in_sg;
  156. size_t in_offset;
  157. struct scatterlist *out_sg;
  158. size_t out_offset;
  159. };
  160. static struct tegra_aes_dev *aes_dev;
  161. struct tegra_aes_ctx {
  162. struct tegra_aes_dev *dd;
  163. unsigned long flags;
  164. struct tegra_aes_slot *slot;
  165. u8 key[AES_MAX_KEY_SIZE];
  166. size_t keylen;
  167. };
  168. static struct tegra_aes_ctx rng_ctx = {
  169. .flags = FLAGS_NEW_KEY,
  170. .keylen = AES_KEYSIZE_128,
  171. };
  172. /* keep registered devices data here */
  173. static struct list_head dev_list;
  174. static DEFINE_SPINLOCK(list_lock);
  175. static DEFINE_MUTEX(aes_lock);
  176. static void aes_workqueue_handler(struct work_struct *work);
  177. static DECLARE_WORK(aes_work, aes_workqueue_handler);
  178. static struct workqueue_struct *aes_wq;
  179. extern unsigned long long tegra_chip_uid(void);
  180. static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
  181. {
  182. return readl(dd->io_base + offset);
  183. }
  184. static inline void aes_writel(struct tegra_aes_dev *dd, u32 val, u32 offset)
  185. {
  186. writel(val, dd->io_base + offset);
  187. }
  188. static int aes_start_crypt(struct tegra_aes_dev *dd, u32 in_addr, u32 out_addr,
  189. int nblocks, int mode, bool upd_iv)
  190. {
  191. u32 cmdq[AES_HW_MAX_ICQ_LENGTH];
  192. int i, eng_busy, icq_empty, ret;
  193. u32 value;
  194. /* reset all the interrupt bits */
  195. aes_writel(dd, 0xFFFFFFFF, TEGRA_AES_INTR_STATUS);
  196. /* enable error, dma xfer complete interrupts */
  197. aes_writel(dd, 0x33, TEGRA_AES_INT_ENB);
  198. cmdq[0] = CMD_DMASETUP << CMDQ_OPCODE_SHIFT;
  199. cmdq[1] = in_addr;
  200. cmdq[2] = CMD_BLKSTARTENGINE << CMDQ_OPCODE_SHIFT | (nblocks-1);
  201. cmdq[3] = CMD_DMACOMPLETE << CMDQ_OPCODE_SHIFT;
  202. value = aes_readl(dd, TEGRA_AES_CMDQUE_CONTROL);
  203. /* access SDRAM through AHB */
  204. value &= ~TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD;
  205. value &= ~TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD;
  206. value |= TEGRA_AES_CMDQ_CTRL_SRC_STM_SEL_FIELD |
  207. TEGRA_AES_CMDQ_CTRL_DST_STM_SEL_FIELD |
  208. TEGRA_AES_CMDQ_CTRL_ICMDQEN_FIELD;
  209. aes_writel(dd, value, TEGRA_AES_CMDQUE_CONTROL);
  210. dev_dbg(dd->dev, "cmd_q_ctrl=0x%x", value);
  211. value = (0x1 << TEGRA_AES_SECURE_INPUT_ALG_SEL_SHIFT) |
  212. ((dd->ctx->keylen * 8) <<
  213. TEGRA_AES_SECURE_INPUT_KEY_LEN_SHIFT) |
  214. ((u32)upd_iv << TEGRA_AES_SECURE_IV_SELECT_SHIFT);
  215. if (mode & FLAGS_CBC) {
  216. value |= ((((mode & FLAGS_ENCRYPT) ? 2 : 3)
  217. << TEGRA_AES_SECURE_XOR_POS_SHIFT) |
  218. (((mode & FLAGS_ENCRYPT) ? 2 : 3)
  219. << TEGRA_AES_SECURE_VCTRAM_SEL_SHIFT) |
  220. ((mode & FLAGS_ENCRYPT) ? 1 : 0)
  221. << TEGRA_AES_SECURE_CORE_SEL_SHIFT);
  222. } else if (mode & FLAGS_OFB) {
  223. value |= ((TEGRA_AES_SECURE_XOR_POS_FIELD) |
  224. (2 << TEGRA_AES_SECURE_INPUT_SEL_SHIFT) |
  225. (TEGRA_AES_SECURE_CORE_SEL_FIELD));
  226. } else if (mode & FLAGS_RNG) {
  227. value |= (((mode & FLAGS_ENCRYPT) ? 1 : 0)
  228. << TEGRA_AES_SECURE_CORE_SEL_SHIFT |
  229. TEGRA_AES_SECURE_RNG_ENB_FIELD);
  230. } else {
  231. value |= (((mode & FLAGS_ENCRYPT) ? 1 : 0)
  232. << TEGRA_AES_SECURE_CORE_SEL_SHIFT);
  233. }
  234. dev_dbg(dd->dev, "secure_in_sel=0x%x", value);
  235. aes_writel(dd, value, TEGRA_AES_SECURE_INPUT_SELECT);
  236. aes_writel(dd, out_addr, TEGRA_AES_SECURE_DEST_ADDR);
  237. INIT_COMPLETION(dd->op_complete);
  238. for (i = 0; i < AES_HW_MAX_ICQ_LENGTH - 1; i++) {
  239. do {
  240. value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
  241. eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
  242. icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
  243. } while (eng_busy & (!icq_empty));
  244. aes_writel(dd, cmdq[i], TEGRA_AES_ICMDQUE_WR);
  245. }
  246. ret = wait_for_completion_timeout(&dd->op_complete,
  247. msecs_to_jiffies(150));
  248. if (ret == 0) {
  249. dev_err(dd->dev, "timed out (0x%x)\n",
  250. aes_readl(dd, TEGRA_AES_INTR_STATUS));
  251. return -ETIMEDOUT;
  252. }
  253. aes_writel(dd, cmdq[AES_HW_MAX_ICQ_LENGTH - 1], TEGRA_AES_ICMDQUE_WR);
  254. return 0;
  255. }
  256. static void aes_release_key_slot(struct tegra_aes_slot *slot)
  257. {
  258. if (slot->slot_num == SSK_SLOT_NUM)
  259. return;
  260. spin_lock(&list_lock);
  261. list_add_tail(&slot->node, &dev_list);
  262. slot = NULL;
  263. spin_unlock(&list_lock);
  264. }
  265. static struct tegra_aes_slot *aes_find_key_slot(void)
  266. {
  267. struct tegra_aes_slot *slot = NULL;
  268. struct list_head *new_head;
  269. int empty;
  270. spin_lock(&list_lock);
  271. empty = list_empty(&dev_list);
  272. if (!empty) {
  273. slot = list_entry(&dev_list, struct tegra_aes_slot, node);
  274. new_head = dev_list.next;
  275. list_del(&dev_list);
  276. dev_list.next = new_head->next;
  277. dev_list.prev = NULL;
  278. }
  279. spin_unlock(&list_lock);
  280. return slot;
  281. }
  282. static int aes_set_key(struct tegra_aes_dev *dd)
  283. {
  284. u32 value, cmdq[2];
  285. struct tegra_aes_ctx *ctx = dd->ctx;
  286. int eng_busy, icq_empty, dma_busy;
  287. bool use_ssk = false;
  288. /* use ssk? */
  289. if (!dd->ctx->slot) {
  290. dev_dbg(dd->dev, "using ssk");
  291. dd->ctx->slot = &ssk;
  292. use_ssk = true;
  293. }
  294. /* enable key schedule generation in hardware */
  295. value = aes_readl(dd, TEGRA_AES_SECURE_CONFIG_EXT);
  296. value &= ~TEGRA_AES_SECURE_KEY_SCH_DIS_FIELD;
  297. aes_writel(dd, value, TEGRA_AES_SECURE_CONFIG_EXT);
  298. /* select the key slot */
  299. value = aes_readl(dd, TEGRA_AES_SECURE_CONFIG);
  300. value &= ~TEGRA_AES_SECURE_KEY_INDEX_FIELD;
  301. value |= (ctx->slot->slot_num << TEGRA_AES_SECURE_KEY_INDEX_SHIFT);
  302. aes_writel(dd, value, TEGRA_AES_SECURE_CONFIG);
  303. if (use_ssk)
  304. return 0;
  305. /* copy the key table from sdram to vram */
  306. cmdq[0] = CMD_MEMDMAVD << CMDQ_OPCODE_SHIFT |
  307. MEMDMA_DIR_DTOVRAM << MEMDMA_DIR_SHIFT |
  308. AES_HW_KEY_TABLE_LENGTH_BYTES / sizeof(u32) <<
  309. MEMDMA_NUM_WORDS_SHIFT;
  310. cmdq[1] = (u32)dd->ivkey_phys_base;
  311. aes_writel(dd, cmdq[0], TEGRA_AES_ICMDQUE_WR);
  312. aes_writel(dd, cmdq[1], TEGRA_AES_ICMDQUE_WR);
  313. do {
  314. value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
  315. eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
  316. icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
  317. dma_busy = value & TEGRA_AES_DMA_BUSY_FIELD;
  318. } while (eng_busy & (!icq_empty) & dma_busy);
  319. /* settable command to get key into internal registers */
  320. value = CMD_SETTABLE << CMDQ_OPCODE_SHIFT |
  321. SUBCMD_CRYPTO_TABLE_SEL << CMDQ_TABLESEL_SHIFT |
  322. SUBCMD_VRAM_SEL << CMDQ_VRAMSEL_SHIFT |
  323. (SUBCMD_KEY_TABLE_SEL | ctx->slot->slot_num) <<
  324. CMDQ_KEYTABLEID_SHIFT;
  325. aes_writel(dd, value, TEGRA_AES_ICMDQUE_WR);
  326. do {
  327. value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
  328. eng_busy = value & TEGRA_AES_ENGINE_BUSY_FIELD;
  329. icq_empty = value & TEGRA_AES_ICQ_EMPTY_FIELD;
  330. } while (eng_busy & (!icq_empty));
  331. return 0;
  332. }
  333. static int tegra_aes_handle_req(struct tegra_aes_dev *dd)
  334. {
  335. struct crypto_async_request *async_req, *backlog;
  336. struct crypto_ablkcipher *tfm;
  337. struct tegra_aes_ctx *ctx;
  338. struct tegra_aes_reqctx *rctx;
  339. struct ablkcipher_request *req;
  340. unsigned long flags;
  341. int dma_max = AES_HW_DMA_BUFFER_SIZE_BYTES;
  342. int ret = 0, nblocks, total;
  343. int count = 0;
  344. dma_addr_t addr_in, addr_out;
  345. struct scatterlist *in_sg, *out_sg;
  346. if (!dd)
  347. return -EINVAL;
  348. spin_lock_irqsave(&dd->lock, flags);
  349. backlog = crypto_get_backlog(&dd->queue);
  350. async_req = crypto_dequeue_request(&dd->queue);
  351. if (!async_req)
  352. clear_bit(FLAGS_BUSY, &dd->flags);
  353. spin_unlock_irqrestore(&dd->lock, flags);
  354. if (!async_req)
  355. return -ENODATA;
  356. if (backlog)
  357. backlog->complete(backlog, -EINPROGRESS);
  358. req = ablkcipher_request_cast(async_req);
  359. dev_dbg(dd->dev, "%s: get new req\n", __func__);
  360. if (!req->src || !req->dst)
  361. return -EINVAL;
  362. /* take mutex to access the aes hw */
  363. mutex_lock(&aes_lock);
  364. /* assign new request to device */
  365. dd->req = req;
  366. dd->total = req->nbytes;
  367. dd->in_offset = 0;
  368. dd->in_sg = req->src;
  369. dd->out_offset = 0;
  370. dd->out_sg = req->dst;
  371. in_sg = dd->in_sg;
  372. out_sg = dd->out_sg;
  373. total = dd->total;
  374. tfm = crypto_ablkcipher_reqtfm(req);
  375. rctx = ablkcipher_request_ctx(req);
  376. ctx = crypto_ablkcipher_ctx(tfm);
  377. rctx->mode &= FLAGS_MODE_MASK;
  378. dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
  379. dd->iv = (u8 *)req->info;
  380. dd->ivlen = crypto_ablkcipher_ivsize(tfm);
  381. /* assign new context to device */
  382. ctx->dd = dd;
  383. dd->ctx = ctx;
  384. if (ctx->flags & FLAGS_NEW_KEY) {
  385. /* copy the key */
  386. memcpy(dd->ivkey_base, ctx->key, ctx->keylen);
  387. memset(dd->ivkey_base + ctx->keylen, 0, AES_HW_KEY_TABLE_LENGTH_BYTES - ctx->keylen);
  388. aes_set_key(dd);
  389. ctx->flags &= ~FLAGS_NEW_KEY;
  390. }
  391. if (((dd->flags & FLAGS_CBC) || (dd->flags & FLAGS_OFB)) && dd->iv) {
  392. /* set iv to the aes hw slot
  393. * Hw generates updated iv only after iv is set in slot.
  394. * So key and iv is passed asynchronously.
  395. */
  396. memcpy(dd->buf_in, dd->iv, dd->ivlen);
  397. ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
  398. dd->dma_buf_out, 1, FLAGS_CBC, false);
  399. if (ret < 0) {
  400. dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
  401. goto out;
  402. }
  403. }
  404. while (total) {
  405. dev_dbg(dd->dev, "remain: %d\n", total);
  406. ret = dma_map_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
  407. if (!ret) {
  408. dev_err(dd->dev, "dma_map_sg() error\n");
  409. goto out;
  410. }
  411. ret = dma_map_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
  412. if (!ret) {
  413. dev_err(dd->dev, "dma_map_sg() error\n");
  414. dma_unmap_sg(dd->dev, dd->in_sg,
  415. 1, DMA_TO_DEVICE);
  416. goto out;
  417. }
  418. addr_in = sg_dma_address(in_sg);
  419. addr_out = sg_dma_address(out_sg);
  420. dd->flags |= FLAGS_FAST;
  421. count = min_t(int, sg_dma_len(in_sg), dma_max);
  422. WARN_ON(sg_dma_len(in_sg) != sg_dma_len(out_sg));
  423. nblocks = DIV_ROUND_UP(count, AES_BLOCK_SIZE);
  424. ret = aes_start_crypt(dd, addr_in, addr_out, nblocks,
  425. dd->flags, true);
  426. dma_unmap_sg(dd->dev, out_sg, 1, DMA_FROM_DEVICE);
  427. dma_unmap_sg(dd->dev, in_sg, 1, DMA_TO_DEVICE);
  428. if (ret < 0) {
  429. dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
  430. goto out;
  431. }
  432. dd->flags &= ~FLAGS_FAST;
  433. dev_dbg(dd->dev, "out: copied %d\n", count);
  434. total -= count;
  435. in_sg = sg_next(in_sg);
  436. out_sg = sg_next(out_sg);
  437. WARN_ON(((total != 0) && (!in_sg || !out_sg)));
  438. }
  439. out:
  440. mutex_unlock(&aes_lock);
  441. dd->total = total;
  442. if (dd->req->base.complete)
  443. dd->req->base.complete(&dd->req->base, ret);
  444. dev_dbg(dd->dev, "%s: exit\n", __func__);
  445. return ret;
  446. }
  447. static int tegra_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  448. unsigned int keylen)
  449. {
  450. struct tegra_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  451. struct tegra_aes_dev *dd = aes_dev;
  452. struct tegra_aes_slot *key_slot;
  453. if ((keylen != AES_KEYSIZE_128) && (keylen != AES_KEYSIZE_192) &&
  454. (keylen != AES_KEYSIZE_256)) {
  455. dev_err(dd->dev, "unsupported key size\n");
  456. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  457. return -EINVAL;
  458. }
  459. dev_dbg(dd->dev, "keylen: %d\n", keylen);
  460. ctx->dd = dd;
  461. if (key) {
  462. if (!ctx->slot) {
  463. key_slot = aes_find_key_slot();
  464. if (!key_slot) {
  465. dev_err(dd->dev, "no empty slot\n");
  466. return -ENOMEM;
  467. }
  468. ctx->slot = key_slot;
  469. }
  470. memcpy(ctx->key, key, keylen);
  471. ctx->keylen = keylen;
  472. }
  473. ctx->flags |= FLAGS_NEW_KEY;
  474. dev_dbg(dd->dev, "done\n");
  475. return 0;
  476. }
  477. static void aes_workqueue_handler(struct work_struct *work)
  478. {
  479. struct tegra_aes_dev *dd = aes_dev;
  480. int ret;
  481. ret = clk_enable(dd->aes_clk);
  482. if (ret)
  483. BUG_ON("clock enable failed");
  484. /* empty the crypto queue and then return */
  485. do {
  486. ret = tegra_aes_handle_req(dd);
  487. } while (!ret);
  488. clk_disable(dd->aes_clk);
  489. }
  490. static irqreturn_t aes_irq(int irq, void *dev_id)
  491. {
  492. struct tegra_aes_dev *dd = (struct tegra_aes_dev *)dev_id;
  493. u32 value = aes_readl(dd, TEGRA_AES_INTR_STATUS);
  494. int busy = test_bit(FLAGS_BUSY, &dd->flags);
  495. if (!busy) {
  496. dev_dbg(dd->dev, "spurious interrupt\n");
  497. return IRQ_NONE;
  498. }
  499. dev_dbg(dd->dev, "irq_stat: 0x%x\n", value);
  500. if (value & TEGRA_AES_INT_ERROR_MASK)
  501. aes_writel(dd, TEGRA_AES_INT_ERROR_MASK, TEGRA_AES_INTR_STATUS);
  502. if (!(value & TEGRA_AES_ENGINE_BUSY_FIELD))
  503. complete(&dd->op_complete);
  504. else
  505. return IRQ_NONE;
  506. return IRQ_HANDLED;
  507. }
  508. static int tegra_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
  509. {
  510. struct tegra_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  511. struct tegra_aes_dev *dd = aes_dev;
  512. unsigned long flags;
  513. int err = 0;
  514. int busy;
  515. dev_dbg(dd->dev, "nbytes: %d, enc: %d, cbc: %d, ofb: %d\n",
  516. req->nbytes, !!(mode & FLAGS_ENCRYPT),
  517. !!(mode & FLAGS_CBC), !!(mode & FLAGS_OFB));
  518. rctx->mode = mode;
  519. spin_lock_irqsave(&dd->lock, flags);
  520. err = ablkcipher_enqueue_request(&dd->queue, req);
  521. busy = test_and_set_bit(FLAGS_BUSY, &dd->flags);
  522. spin_unlock_irqrestore(&dd->lock, flags);
  523. if (!busy)
  524. queue_work(aes_wq, &aes_work);
  525. return err;
  526. }
  527. static int tegra_aes_ecb_encrypt(struct ablkcipher_request *req)
  528. {
  529. return tegra_aes_crypt(req, FLAGS_ENCRYPT);
  530. }
  531. static int tegra_aes_ecb_decrypt(struct ablkcipher_request *req)
  532. {
  533. return tegra_aes_crypt(req, 0);
  534. }
  535. static int tegra_aes_cbc_encrypt(struct ablkcipher_request *req)
  536. {
  537. return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
  538. }
  539. static int tegra_aes_cbc_decrypt(struct ablkcipher_request *req)
  540. {
  541. return tegra_aes_crypt(req, FLAGS_CBC);
  542. }
  543. static int tegra_aes_ofb_encrypt(struct ablkcipher_request *req)
  544. {
  545. return tegra_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_OFB);
  546. }
  547. static int tegra_aes_ofb_decrypt(struct ablkcipher_request *req)
  548. {
  549. return tegra_aes_crypt(req, FLAGS_OFB);
  550. }
  551. static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
  552. unsigned int dlen)
  553. {
  554. struct tegra_aes_dev *dd = aes_dev;
  555. struct tegra_aes_ctx *ctx = &rng_ctx;
  556. int ret, i;
  557. u8 *dest = rdata, *dt = dd->dt;
  558. /* take mutex to access the aes hw */
  559. mutex_lock(&aes_lock);
  560. ret = clk_enable(dd->aes_clk);
  561. if (ret)
  562. return ret;
  563. ctx->dd = dd;
  564. dd->ctx = ctx;
  565. dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
  566. memcpy(dd->buf_in, dt, DEFAULT_RNG_BLK_SZ);
  567. ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
  568. (u32)dd->dma_buf_out, 1, dd->flags, true);
  569. if (ret < 0) {
  570. dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
  571. dlen = ret;
  572. goto out;
  573. }
  574. memcpy(dest, dd->buf_out, dlen);
  575. /* update the DT */
  576. for (i = DEFAULT_RNG_BLK_SZ - 1; i >= 0; i--) {
  577. dt[i] += 1;
  578. if (dt[i] != 0)
  579. break;
  580. }
  581. out:
  582. clk_disable(dd->aes_clk);
  583. mutex_unlock(&aes_lock);
  584. dev_dbg(dd->dev, "%s: done\n", __func__);
  585. return dlen;
  586. }
  587. static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
  588. unsigned int slen)
  589. {
  590. struct tegra_aes_dev *dd = aes_dev;
  591. struct tegra_aes_ctx *ctx = &rng_ctx;
  592. struct tegra_aes_slot *key_slot;
  593. struct timespec ts;
  594. int ret = 0;
  595. u64 nsec, tmp[2];
  596. u8 *dt;
  597. if (!ctx || !dd) {
  598. dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n",
  599. (unsigned int)ctx, (unsigned int)dd);
  600. return -EINVAL;
  601. }
  602. if (slen < (DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
  603. dev_err(dd->dev, "seed size invalid");
  604. return -ENOMEM;
  605. }
  606. /* take mutex to access the aes hw */
  607. mutex_lock(&aes_lock);
  608. if (!ctx->slot) {
  609. key_slot = aes_find_key_slot();
  610. if (!key_slot) {
  611. dev_err(dd->dev, "no empty slot\n");
  612. mutex_unlock(&aes_lock);
  613. return -ENOMEM;
  614. }
  615. ctx->slot = key_slot;
  616. }
  617. ctx->dd = dd;
  618. dd->ctx = ctx;
  619. dd->ctr = 0;
  620. ctx->keylen = AES_KEYSIZE_128;
  621. ctx->flags |= FLAGS_NEW_KEY;
  622. /* copy the key to the key slot */
  623. memcpy(dd->ivkey_base, seed + DEFAULT_RNG_BLK_SZ, AES_KEYSIZE_128);
  624. memset(dd->ivkey_base + AES_KEYSIZE_128, 0, AES_HW_KEY_TABLE_LENGTH_BYTES - AES_KEYSIZE_128);
  625. dd->iv = seed;
  626. dd->ivlen = slen;
  627. dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
  628. ret = clk_enable(dd->aes_clk);
  629. if (ret)
  630. return ret;
  631. aes_set_key(dd);
  632. /* set seed to the aes hw slot */
  633. memcpy(dd->buf_in, dd->iv, DEFAULT_RNG_BLK_SZ);
  634. ret = aes_start_crypt(dd, (u32)dd->dma_buf_in,
  635. dd->dma_buf_out, 1, FLAGS_CBC, false);
  636. if (ret < 0) {
  637. dev_err(dd->dev, "aes_start_crypt fail(%d)\n", ret);
  638. goto out;
  639. }
  640. if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
  641. dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
  642. } else {
  643. getnstimeofday(&ts);
  644. nsec = timespec_to_ns(&ts);
  645. do_div(nsec, 1000);
  646. nsec ^= dd->ctr << 56;
  647. dd->ctr++;
  648. tmp[0] = nsec;
  649. tmp[1] = tegra_chip_uid();
  650. dt = (u8 *)tmp;
  651. }
  652. memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
  653. out:
  654. clk_disable(dd->aes_clk);
  655. mutex_unlock(&aes_lock);
  656. dev_dbg(dd->dev, "%s: done\n", __func__);
  657. return ret;
  658. }
  659. static int tegra_aes_cra_init(struct crypto_tfm *tfm)
  660. {
  661. tfm->crt_ablkcipher.reqsize = sizeof(struct tegra_aes_reqctx);
  662. return 0;
  663. }
  664. void tegra_aes_cra_exit(struct crypto_tfm *tfm)
  665. {
  666. struct tegra_aes_ctx *ctx =
  667. crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
  668. if (ctx && ctx->slot)
  669. aes_release_key_slot(ctx->slot);
  670. }
  671. static struct crypto_alg algs[] = {
  672. {
  673. .cra_name = "ecb(aes)",
  674. .cra_driver_name = "ecb-aes-tegra",
  675. .cra_priority = 300,
  676. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  677. .cra_blocksize = AES_BLOCK_SIZE,
  678. .cra_alignmask = 3,
  679. .cra_type = &crypto_ablkcipher_type,
  680. .cra_u.ablkcipher = {
  681. .min_keysize = AES_MIN_KEY_SIZE,
  682. .max_keysize = AES_MAX_KEY_SIZE,
  683. .setkey = tegra_aes_setkey,
  684. .encrypt = tegra_aes_ecb_encrypt,
  685. .decrypt = tegra_aes_ecb_decrypt,
  686. },
  687. }, {
  688. .cra_name = "cbc(aes)",
  689. .cra_driver_name = "cbc-aes-tegra",
  690. .cra_priority = 300,
  691. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  692. .cra_blocksize = AES_BLOCK_SIZE,
  693. .cra_alignmask = 3,
  694. .cra_type = &crypto_ablkcipher_type,
  695. .cra_u.ablkcipher = {
  696. .min_keysize = AES_MIN_KEY_SIZE,
  697. .max_keysize = AES_MAX_KEY_SIZE,
  698. .ivsize = AES_MIN_KEY_SIZE,
  699. .setkey = tegra_aes_setkey,
  700. .encrypt = tegra_aes_cbc_encrypt,
  701. .decrypt = tegra_aes_cbc_decrypt,
  702. }
  703. }, {
  704. .cra_name = "ofb(aes)",
  705. .cra_driver_name = "ofb-aes-tegra",
  706. .cra_priority = 300,
  707. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  708. .cra_blocksize = AES_BLOCK_SIZE,
  709. .cra_alignmask = 3,
  710. .cra_type = &crypto_ablkcipher_type,
  711. .cra_u.ablkcipher = {
  712. .min_keysize = AES_MIN_KEY_SIZE,
  713. .max_keysize = AES_MAX_KEY_SIZE,
  714. .ivsize = AES_MIN_KEY_SIZE,
  715. .setkey = tegra_aes_setkey,
  716. .encrypt = tegra_aes_ofb_encrypt,
  717. .decrypt = tegra_aes_ofb_decrypt,
  718. }
  719. }, {
  720. .cra_name = "ansi_cprng",
  721. .cra_driver_name = "rng-aes-tegra",
  722. .cra_flags = CRYPTO_ALG_TYPE_RNG,
  723. .cra_ctxsize = sizeof(struct tegra_aes_ctx),
  724. .cra_type = &crypto_rng_type,
  725. .cra_u.rng = {
  726. .rng_make_random = tegra_aes_get_random,
  727. .rng_reset = tegra_aes_rng_reset,
  728. .seedsize = AES_KEYSIZE_128 + (2 * DEFAULT_RNG_BLK_SZ),
  729. }
  730. }
  731. };
  732. static int tegra_aes_probe(struct platform_device *pdev)
  733. {
  734. struct device *dev = &pdev->dev;
  735. struct tegra_aes_dev *dd;
  736. struct resource *res;
  737. int err = -ENOMEM, i = 0, j;
  738. dd = devm_kzalloc(dev, sizeof(struct tegra_aes_dev), GFP_KERNEL);
  739. if (dd == NULL) {
  740. dev_err(dev, "unable to alloc data struct.\n");
  741. return err;
  742. }
  743. dd->dev = dev;
  744. platform_set_drvdata(pdev, dd);
  745. dd->slots = devm_kzalloc(dev, sizeof(struct tegra_aes_slot) *
  746. AES_NR_KEYSLOTS, GFP_KERNEL);
  747. if (dd->slots == NULL) {
  748. dev_err(dev, "unable to alloc slot struct.\n");
  749. goto out;
  750. }
  751. spin_lock_init(&dd->lock);
  752. crypto_init_queue(&dd->queue, TEGRA_AES_QUEUE_LENGTH);
  753. /* Get the module base address */
  754. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  755. if (!res) {
  756. dev_err(dev, "invalid resource type: base\n");
  757. err = -ENODEV;
  758. goto out;
  759. }
  760. if (!devm_request_mem_region(&pdev->dev, res->start,
  761. resource_size(res),
  762. dev_name(&pdev->dev))) {
  763. dev_err(&pdev->dev, "Couldn't request MEM resource\n");
  764. return -ENODEV;
  765. }
  766. dd->io_base = devm_ioremap(dev, res->start, resource_size(res));
  767. if (!dd->io_base) {
  768. dev_err(dev, "can't ioremap register space\n");
  769. err = -ENOMEM;
  770. goto out;
  771. }
  772. /* Initialize the vde clock */
  773. dd->aes_clk = clk_get(dev, "vde");
  774. if (IS_ERR(dd->aes_clk)) {
  775. dev_err(dev, "iclock intialization failed.\n");
  776. err = -ENODEV;
  777. goto out;
  778. }
  779. err = clk_set_rate(dd->aes_clk, ULONG_MAX);
  780. if (err) {
  781. dev_err(dd->dev, "iclk set_rate fail(%d)\n", err);
  782. goto out;
  783. }
  784. /*
  785. * the foll contiguous memory is allocated as follows -
  786. * - hardware key table
  787. * - key schedule
  788. */
  789. dd->ivkey_base = dma_alloc_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES,
  790. &dd->ivkey_phys_base,
  791. GFP_KERNEL);
  792. if (!dd->ivkey_base) {
  793. dev_err(dev, "can not allocate iv/key buffer\n");
  794. err = -ENOMEM;
  795. goto out;
  796. }
  797. dd->buf_in = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
  798. &dd->dma_buf_in, GFP_KERNEL);
  799. if (!dd->buf_in) {
  800. dev_err(dev, "can not allocate dma-in buffer\n");
  801. err = -ENOMEM;
  802. goto out;
  803. }
  804. dd->buf_out = dma_alloc_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
  805. &dd->dma_buf_out, GFP_KERNEL);
  806. if (!dd->buf_out) {
  807. dev_err(dev, "can not allocate dma-out buffer\n");
  808. err = -ENOMEM;
  809. goto out;
  810. }
  811. init_completion(&dd->op_complete);
  812. aes_wq = alloc_workqueue("tegra_aes_wq", WQ_HIGHPRI | WQ_UNBOUND, 1);
  813. if (!aes_wq) {
  814. dev_err(dev, "alloc_workqueue failed\n");
  815. goto out;
  816. }
  817. /* get the irq */
  818. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  819. if (!res) {
  820. dev_err(dev, "invalid resource type: base\n");
  821. err = -ENODEV;
  822. goto out;
  823. }
  824. dd->irq = res->start;
  825. err = devm_request_irq(dev, dd->irq, aes_irq, IRQF_TRIGGER_HIGH |
  826. IRQF_SHARED, "tegra-aes", dd);
  827. if (err) {
  828. dev_err(dev, "request_irq failed\n");
  829. goto out;
  830. }
  831. mutex_init(&aes_lock);
  832. INIT_LIST_HEAD(&dev_list);
  833. spin_lock_init(&list_lock);
  834. spin_lock(&list_lock);
  835. for (i = 0; i < AES_NR_KEYSLOTS; i++) {
  836. if (i == SSK_SLOT_NUM)
  837. continue;
  838. dd->slots[i].slot_num = i;
  839. INIT_LIST_HEAD(&dd->slots[i].node);
  840. list_add_tail(&dd->slots[i].node, &dev_list);
  841. }
  842. spin_unlock(&list_lock);
  843. aes_dev = dd;
  844. for (i = 0; i < ARRAY_SIZE(algs); i++) {
  845. INIT_LIST_HEAD(&algs[i].cra_list);
  846. algs[i].cra_priority = 300;
  847. algs[i].cra_ctxsize = sizeof(struct tegra_aes_ctx);
  848. algs[i].cra_module = THIS_MODULE;
  849. algs[i].cra_init = tegra_aes_cra_init;
  850. algs[i].cra_exit = tegra_aes_cra_exit;
  851. err = crypto_register_alg(&algs[i]);
  852. if (err)
  853. goto out;
  854. }
  855. dev_info(dev, "registered");
  856. return 0;
  857. out:
  858. for (j = 0; j < i; j++)
  859. crypto_unregister_alg(&algs[j]);
  860. if (dd->ivkey_base)
  861. dma_free_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES,
  862. dd->ivkey_base, dd->ivkey_phys_base);
  863. if (dd->buf_in)
  864. dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
  865. dd->buf_in, dd->dma_buf_in);
  866. if (dd->buf_out)
  867. dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
  868. dd->buf_out, dd->dma_buf_out);
  869. if (IS_ERR(dd->aes_clk))
  870. clk_put(dd->aes_clk);
  871. if (aes_wq)
  872. destroy_workqueue(aes_wq);
  873. spin_lock(&list_lock);
  874. list_del(&dev_list);
  875. spin_unlock(&list_lock);
  876. aes_dev = NULL;
  877. dev_err(dev, "%s: initialization failed.\n", __func__);
  878. return err;
  879. }
  880. static int __devexit tegra_aes_remove(struct platform_device *pdev)
  881. {
  882. struct device *dev = &pdev->dev;
  883. struct tegra_aes_dev *dd = platform_get_drvdata(pdev);
  884. int i;
  885. for (i = 0; i < ARRAY_SIZE(algs); i++)
  886. crypto_unregister_alg(&algs[i]);
  887. cancel_work_sync(&aes_work);
  888. destroy_workqueue(aes_wq);
  889. spin_lock(&list_lock);
  890. list_del(&dev_list);
  891. spin_unlock(&list_lock);
  892. dma_free_coherent(dev, AES_HW_KEY_TABLE_LENGTH_BYTES,
  893. dd->ivkey_base, dd->ivkey_phys_base);
  894. dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
  895. dd->buf_in, dd->dma_buf_in);
  896. dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
  897. dd->buf_out, dd->dma_buf_out);
  898. clk_put(dd->aes_clk);
  899. aes_dev = NULL;
  900. return 0;
  901. }
  902. static struct of_device_id tegra_aes_of_match[] __devinitdata = {
  903. { .compatible = "nvidia,tegra20-aes", },
  904. { .compatible = "nvidia,tegra30-aes", },
  905. { },
  906. };
  907. static struct platform_driver tegra_aes_driver = {
  908. .probe = tegra_aes_probe,
  909. .remove = __devexit_p(tegra_aes_remove),
  910. .driver = {
  911. .name = "tegra-aes",
  912. .owner = THIS_MODULE,
  913. .of_match_table = tegra_aes_of_match,
  914. },
  915. };
  916. module_platform_driver(tegra_aes_driver);
  917. MODULE_DESCRIPTION("Tegra AES/OFB/CPRNG hw acceleration support.");
  918. MODULE_AUTHOR("NVIDIA Corporation");
  919. MODULE_LICENSE("GPL v2");