sun4i-ss-cipher.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. /*
  2. * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
  3. *
  4. * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
  5. *
  6. * This file add support for AES cipher with 128,192,256 bits
  7. * keysize in CBC and ECB mode.
  8. * Add support also for DES and 3DES in CBC and ECB mode.
  9. *
  10. * You could find the datasheet in Documentation/arm/sunxi/README
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. */
  17. #include "sun4i-ss.h"
  18. static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
  19. {
  20. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  21. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  22. struct sun4i_ss_ctx *ss = op->ss;
  23. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  24. struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
  25. u32 mode = ctx->mode;
  26. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  27. u32 rx_cnt = SS_RX_DEFAULT;
  28. u32 tx_cnt = 0;
  29. u32 spaces;
  30. u32 v;
  31. int err = 0;
  32. unsigned int i;
  33. unsigned int ileft = areq->nbytes;
  34. unsigned int oleft = areq->nbytes;
  35. unsigned int todo;
  36. struct sg_mapping_iter mi, mo;
  37. unsigned int oi, oo; /* offset for in and out */
  38. unsigned long flags;
  39. if (areq->nbytes == 0)
  40. return 0;
  41. if (!areq->info) {
  42. dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
  43. return -EINVAL;
  44. }
  45. if (!areq->src || !areq->dst) {
  46. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  47. return -EINVAL;
  48. }
  49. spin_lock_irqsave(&ss->slock, flags);
  50. for (i = 0; i < op->keylen; i += 4)
  51. writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
  52. if (areq->info) {
  53. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  54. v = *(u32 *)(areq->info + i * 4);
  55. writel(v, ss->base + SS_IV0 + i * 4);
  56. }
  57. }
  58. writel(mode, ss->base + SS_CTL);
  59. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  60. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  61. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  62. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  63. sg_miter_next(&mi);
  64. sg_miter_next(&mo);
  65. if (!mi.addr || !mo.addr) {
  66. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  67. err = -EINVAL;
  68. goto release_ss;
  69. }
  70. ileft = areq->nbytes / 4;
  71. oleft = areq->nbytes / 4;
  72. oi = 0;
  73. oo = 0;
  74. do {
  75. todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
  76. if (todo > 0) {
  77. ileft -= todo;
  78. writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
  79. oi += todo * 4;
  80. }
  81. if (oi == mi.length) {
  82. sg_miter_next(&mi);
  83. oi = 0;
  84. }
  85. spaces = readl(ss->base + SS_FCSR);
  86. rx_cnt = SS_RXFIFO_SPACES(spaces);
  87. tx_cnt = SS_TXFIFO_SPACES(spaces);
  88. todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
  89. if (todo > 0) {
  90. oleft -= todo;
  91. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  92. oo += todo * 4;
  93. }
  94. if (oo == mo.length) {
  95. sg_miter_next(&mo);
  96. oo = 0;
  97. }
  98. } while (oleft > 0);
  99. if (areq->info) {
  100. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  101. v = readl(ss->base + SS_IV0 + i * 4);
  102. *(u32 *)(areq->info + i * 4) = v;
  103. }
  104. }
  105. release_ss:
  106. sg_miter_stop(&mi);
  107. sg_miter_stop(&mo);
  108. writel(0, ss->base + SS_CTL);
  109. spin_unlock_irqrestore(&ss->slock, flags);
  110. return err;
  111. }
  112. /* Generic function that support SG with size not multiple of 4 */
  113. static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
  114. {
  115. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  116. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  117. struct sun4i_ss_ctx *ss = op->ss;
  118. int no_chunk = 1;
  119. struct scatterlist *in_sg = areq->src;
  120. struct scatterlist *out_sg = areq->dst;
  121. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  122. struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
  123. u32 mode = ctx->mode;
  124. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  125. u32 rx_cnt = SS_RX_DEFAULT;
  126. u32 tx_cnt = 0;
  127. u32 v;
  128. u32 spaces;
  129. int err = 0;
  130. unsigned int i;
  131. unsigned int ileft = areq->nbytes;
  132. unsigned int oleft = areq->nbytes;
  133. unsigned int todo;
  134. struct sg_mapping_iter mi, mo;
  135. unsigned int oi, oo; /* offset for in and out */
  136. char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
  137. char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
  138. unsigned int ob = 0; /* offset in buf */
  139. unsigned int obo = 0; /* offset in bufo*/
  140. unsigned int obl = 0; /* length of data in bufo */
  141. unsigned long flags;
  142. if (areq->nbytes == 0)
  143. return 0;
  144. if (!areq->info) {
  145. dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
  146. return -EINVAL;
  147. }
  148. if (!areq->src || !areq->dst) {
  149. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  150. return -EINVAL;
  151. }
  152. /*
  153. * if we have only SGs with size multiple of 4,
  154. * we can use the SS optimized function
  155. */
  156. while (in_sg && no_chunk == 1) {
  157. if ((in_sg->length % 4) != 0)
  158. no_chunk = 0;
  159. in_sg = sg_next(in_sg);
  160. }
  161. while (out_sg && no_chunk == 1) {
  162. if ((out_sg->length % 4) != 0)
  163. no_chunk = 0;
  164. out_sg = sg_next(out_sg);
  165. }
  166. if (no_chunk == 1)
  167. return sun4i_ss_opti_poll(areq);
  168. spin_lock_irqsave(&ss->slock, flags);
  169. for (i = 0; i < op->keylen; i += 4)
  170. writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
  171. if (areq->info) {
  172. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  173. v = *(u32 *)(areq->info + i * 4);
  174. writel(v, ss->base + SS_IV0 + i * 4);
  175. }
  176. }
  177. writel(mode, ss->base + SS_CTL);
  178. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  179. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  180. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  181. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  182. sg_miter_next(&mi);
  183. sg_miter_next(&mo);
  184. if (!mi.addr || !mo.addr) {
  185. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  186. err = -EINVAL;
  187. goto release_ss;
  188. }
  189. ileft = areq->nbytes;
  190. oleft = areq->nbytes;
  191. oi = 0;
  192. oo = 0;
  193. while (oleft > 0) {
  194. if (ileft > 0) {
  195. /*
  196. * todo is the number of consecutive 4byte word that we
  197. * can read from current SG
  198. */
  199. todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
  200. if (todo > 0 && ob == 0) {
  201. writesl(ss->base + SS_RXFIFO, mi.addr + oi,
  202. todo);
  203. ileft -= todo * 4;
  204. oi += todo * 4;
  205. } else {
  206. /*
  207. * not enough consecutive bytes, so we need to
  208. * linearize in buf. todo is in bytes
  209. * After that copy, if we have a multiple of 4
  210. * we need to be able to write all buf in one
  211. * pass, so it is why we min() with rx_cnt
  212. */
  213. todo = min3(rx_cnt * 4 - ob, ileft,
  214. mi.length - oi);
  215. memcpy(buf + ob, mi.addr + oi, todo);
  216. ileft -= todo;
  217. oi += todo;
  218. ob += todo;
  219. if (ob % 4 == 0) {
  220. writesl(ss->base + SS_RXFIFO, buf,
  221. ob / 4);
  222. ob = 0;
  223. }
  224. }
  225. if (oi == mi.length) {
  226. sg_miter_next(&mi);
  227. oi = 0;
  228. }
  229. }
  230. spaces = readl(ss->base + SS_FCSR);
  231. rx_cnt = SS_RXFIFO_SPACES(spaces);
  232. tx_cnt = SS_TXFIFO_SPACES(spaces);
  233. dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
  234. mode,
  235. oi, mi.length, ileft, areq->nbytes, rx_cnt,
  236. oo, mo.length, oleft, areq->nbytes, tx_cnt, ob);
  237. if (tx_cnt == 0)
  238. continue;
  239. /* todo in 4bytes word */
  240. todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
  241. if (todo > 0) {
  242. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  243. oleft -= todo * 4;
  244. oo += todo * 4;
  245. if (oo == mo.length) {
  246. sg_miter_next(&mo);
  247. oo = 0;
  248. }
  249. } else {
  250. /*
  251. * read obl bytes in bufo, we read at maximum for
  252. * emptying the device
  253. */
  254. readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
  255. obl = tx_cnt * 4;
  256. obo = 0;
  257. do {
  258. /*
  259. * how many bytes we can copy ?
  260. * no more than remaining SG size
  261. * no more than remaining buffer
  262. * no need to test against oleft
  263. */
  264. todo = min(mo.length - oo, obl - obo);
  265. memcpy(mo.addr + oo, bufo + obo, todo);
  266. oleft -= todo;
  267. obo += todo;
  268. oo += todo;
  269. if (oo == mo.length) {
  270. sg_miter_next(&mo);
  271. oo = 0;
  272. }
  273. } while (obo < obl);
  274. /* bufo must be fully used here */
  275. }
  276. }
  277. if (areq->info) {
  278. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  279. v = readl(ss->base + SS_IV0 + i * 4);
  280. *(u32 *)(areq->info + i * 4) = v;
  281. }
  282. }
  283. release_ss:
  284. sg_miter_stop(&mi);
  285. sg_miter_stop(&mo);
  286. writel(0, ss->base + SS_CTL);
  287. spin_unlock_irqrestore(&ss->slock, flags);
  288. return err;
  289. }
  290. /* CBC AES */
  291. int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
  292. {
  293. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  294. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  295. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  296. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  297. op->keymode;
  298. return sun4i_ss_cipher_poll(areq);
  299. }
  300. int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
  301. {
  302. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  303. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  304. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  305. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  306. op->keymode;
  307. return sun4i_ss_cipher_poll(areq);
  308. }
  309. /* ECB AES */
  310. int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
  311. {
  312. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  313. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  314. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  315. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  316. op->keymode;
  317. return sun4i_ss_cipher_poll(areq);
  318. }
  319. int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
  320. {
  321. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  322. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  323. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  324. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  325. op->keymode;
  326. return sun4i_ss_cipher_poll(areq);
  327. }
  328. /* CBC DES */
  329. int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
  330. {
  331. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  332. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  333. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  334. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  335. op->keymode;
  336. return sun4i_ss_cipher_poll(areq);
  337. }
  338. int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
  339. {
  340. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  341. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  342. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  343. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  344. op->keymode;
  345. return sun4i_ss_cipher_poll(areq);
  346. }
  347. /* ECB DES */
  348. int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
  349. {
  350. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  351. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  352. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  353. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  354. op->keymode;
  355. return sun4i_ss_cipher_poll(areq);
  356. }
  357. int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
  358. {
  359. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  360. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  361. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  362. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  363. op->keymode;
  364. return sun4i_ss_cipher_poll(areq);
  365. }
  366. /* CBC 3DES */
  367. int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
  368. {
  369. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  370. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  371. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  372. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  373. op->keymode;
  374. return sun4i_ss_cipher_poll(areq);
  375. }
  376. int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
  377. {
  378. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  379. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  380. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  381. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  382. op->keymode;
  383. return sun4i_ss_cipher_poll(areq);
  384. }
  385. /* ECB 3DES */
  386. int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
  387. {
  388. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  389. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  390. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  391. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  392. op->keymode;
  393. return sun4i_ss_cipher_poll(areq);
  394. }
  395. int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
  396. {
  397. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
  398. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  399. struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
  400. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  401. op->keymode;
  402. return sun4i_ss_cipher_poll(areq);
  403. }
  404. int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
  405. {
  406. struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
  407. struct crypto_alg *alg = tfm->__crt_alg;
  408. struct sun4i_ss_alg_template *algt;
  409. memset(op, 0, sizeof(struct sun4i_tfm_ctx));
  410. algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
  411. op->ss = algt->ss;
  412. tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
  413. return 0;
  414. }
  415. /* check and set the AES key, prepare the mode to be used */
  416. int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  417. unsigned int keylen)
  418. {
  419. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  420. struct sun4i_ss_ctx *ss = op->ss;
  421. switch (keylen) {
  422. case 128 / 8:
  423. op->keymode = SS_AES_128BITS;
  424. break;
  425. case 192 / 8:
  426. op->keymode = SS_AES_192BITS;
  427. break;
  428. case 256 / 8:
  429. op->keymode = SS_AES_256BITS;
  430. break;
  431. default:
  432. dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
  433. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  434. return -EINVAL;
  435. }
  436. op->keylen = keylen;
  437. memcpy(op->key, key, keylen);
  438. return 0;
  439. }
  440. /* check and set the DES key, prepare the mode to be used */
  441. int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  442. unsigned int keylen)
  443. {
  444. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  445. struct sun4i_ss_ctx *ss = op->ss;
  446. u32 flags;
  447. u32 tmp[DES_EXPKEY_WORDS];
  448. int ret;
  449. if (unlikely(keylen != DES_KEY_SIZE)) {
  450. dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  451. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  452. return -EINVAL;
  453. }
  454. flags = crypto_ablkcipher_get_flags(tfm);
  455. ret = des_ekey(tmp, key);
  456. if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
  457. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
  458. dev_dbg(ss->dev, "Weak key %u\n", keylen);
  459. return -EINVAL;
  460. }
  461. op->keylen = keylen;
  462. memcpy(op->key, key, keylen);
  463. return 0;
  464. }
  465. /* check and set the 3DES key, prepare the mode to be used */
  466. int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
  467. unsigned int keylen)
  468. {
  469. struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
  470. struct sun4i_ss_ctx *ss = op->ss;
  471. if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
  472. dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  473. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  474. return -EINVAL;
  475. }
  476. op->keylen = keylen;
  477. memcpy(op->key, key, keylen);
  478. return 0;
  479. }