caamalg_qi.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428
  1. /*
  2. * Freescale FSL CAAM support for crypto API over QI backend.
  3. * Based on caamalg.c
  4. *
  5. * Copyright 2013-2016 Freescale Semiconductor, Inc.
  6. * Copyright 2016-2017 NXP
  7. */
  8. #include "compat.h"
  9. #include "regs.h"
  10. #include "intern.h"
  11. #include "desc_constr.h"
  12. #include "error.h"
  13. #include "sg_sw_qm.h"
  14. #include "key_gen.h"
  15. #include "qi.h"
  16. #include "jr.h"
  17. #include "caamalg_desc.h"
  18. /*
  19. * crypto alg
  20. */
  21. #define CAAM_CRA_PRIORITY 2000
  22. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  23. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  24. SHA512_DIGEST_SIZE * 2)
  25. #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
  26. CAAM_MAX_KEY_SIZE)
  27. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  28. struct caam_alg_entry {
  29. int class1_alg_type;
  30. int class2_alg_type;
  31. bool rfc3686;
  32. bool geniv;
  33. };
  34. struct caam_aead_alg {
  35. struct aead_alg aead;
  36. struct caam_alg_entry caam;
  37. bool registered;
  38. };
  39. /*
  40. * per-session context
  41. */
  42. struct caam_ctx {
  43. struct device *jrdev;
  44. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  45. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  46. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  47. u8 key[CAAM_MAX_KEY_SIZE];
  48. dma_addr_t key_dma;
  49. struct alginfo adata;
  50. struct alginfo cdata;
  51. unsigned int authsize;
  52. struct device *qidev;
  53. spinlock_t lock; /* Protects multiple init of driver context */
  54. struct caam_drv_ctx *drv_ctx[NUM_OP];
  55. };
  56. static int aead_set_sh_desc(struct crypto_aead *aead)
  57. {
  58. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  59. typeof(*alg), aead);
  60. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  61. unsigned int ivsize = crypto_aead_ivsize(aead);
  62. u32 ctx1_iv_off = 0;
  63. u32 *nonce = NULL;
  64. unsigned int data_len[2];
  65. u32 inl_mask;
  66. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  67. OP_ALG_AAI_CTR_MOD128);
  68. const bool is_rfc3686 = alg->caam.rfc3686;
  69. if (!ctx->cdata.keylen || !ctx->authsize)
  70. return 0;
  71. /*
  72. * AES-CTR needs to load IV in CONTEXT1 reg
  73. * at an offset of 128bits (16bytes)
  74. * CONTEXT1[255:128] = IV
  75. */
  76. if (ctr_mode)
  77. ctx1_iv_off = 16;
  78. /*
  79. * RFC3686 specific:
  80. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  81. */
  82. if (is_rfc3686) {
  83. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  84. nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
  85. ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
  86. }
  87. data_len[0] = ctx->adata.keylen_pad;
  88. data_len[1] = ctx->cdata.keylen;
  89. if (alg->caam.geniv)
  90. goto skip_enc;
  91. /* aead_encrypt shared descriptor */
  92. if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
  93. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  94. DESC_JOB_IO_LEN, data_len, &inl_mask,
  95. ARRAY_SIZE(data_len)) < 0)
  96. return -EINVAL;
  97. if (inl_mask & 1)
  98. ctx->adata.key_virt = ctx->key;
  99. else
  100. ctx->adata.key_dma = ctx->key_dma;
  101. if (inl_mask & 2)
  102. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  103. else
  104. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  105. ctx->adata.key_inline = !!(inl_mask & 1);
  106. ctx->cdata.key_inline = !!(inl_mask & 2);
  107. cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  108. ivsize, ctx->authsize, is_rfc3686, nonce,
  109. ctx1_iv_off, true);
  110. skip_enc:
  111. /* aead_decrypt shared descriptor */
  112. if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
  113. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  114. DESC_JOB_IO_LEN, data_len, &inl_mask,
  115. ARRAY_SIZE(data_len)) < 0)
  116. return -EINVAL;
  117. if (inl_mask & 1)
  118. ctx->adata.key_virt = ctx->key;
  119. else
  120. ctx->adata.key_dma = ctx->key_dma;
  121. if (inl_mask & 2)
  122. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  123. else
  124. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  125. ctx->adata.key_inline = !!(inl_mask & 1);
  126. ctx->cdata.key_inline = !!(inl_mask & 2);
  127. cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
  128. ivsize, ctx->authsize, alg->caam.geniv,
  129. is_rfc3686, nonce, ctx1_iv_off, true);
  130. if (!alg->caam.geniv)
  131. goto skip_givenc;
  132. /* aead_givencrypt shared descriptor */
  133. if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
  134. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  135. DESC_JOB_IO_LEN, data_len, &inl_mask,
  136. ARRAY_SIZE(data_len)) < 0)
  137. return -EINVAL;
  138. if (inl_mask & 1)
  139. ctx->adata.key_virt = ctx->key;
  140. else
  141. ctx->adata.key_dma = ctx->key_dma;
  142. if (inl_mask & 2)
  143. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  144. else
  145. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  146. ctx->adata.key_inline = !!(inl_mask & 1);
  147. ctx->cdata.key_inline = !!(inl_mask & 2);
  148. cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  149. ivsize, ctx->authsize, is_rfc3686, nonce,
  150. ctx1_iv_off, true);
  151. skip_givenc:
  152. return 0;
  153. }
  154. static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  155. {
  156. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  157. ctx->authsize = authsize;
  158. aead_set_sh_desc(authenc);
  159. return 0;
  160. }
  161. static int aead_setkey(struct crypto_aead *aead, const u8 *key,
  162. unsigned int keylen)
  163. {
  164. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  165. struct device *jrdev = ctx->jrdev;
  166. struct crypto_authenc_keys keys;
  167. int ret = 0;
  168. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  169. goto badkey;
  170. #ifdef DEBUG
  171. dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
  172. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  173. keys.authkeylen);
  174. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  175. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  176. #endif
  177. ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
  178. keys.authkeylen, CAAM_MAX_KEY_SIZE -
  179. keys.enckeylen);
  180. if (ret)
  181. goto badkey;
  182. /* postpend encryption key to auth split key */
  183. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
  184. dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
  185. keys.enckeylen, DMA_TO_DEVICE);
  186. #ifdef DEBUG
  187. print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
  188. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  189. ctx->adata.keylen_pad + keys.enckeylen, 1);
  190. #endif
  191. ctx->cdata.keylen = keys.enckeylen;
  192. ret = aead_set_sh_desc(aead);
  193. if (ret)
  194. goto badkey;
  195. /* Now update the driver contexts with the new shared descriptor */
  196. if (ctx->drv_ctx[ENCRYPT]) {
  197. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  198. ctx->sh_desc_enc);
  199. if (ret) {
  200. dev_err(jrdev, "driver enc context update failed\n");
  201. goto badkey;
  202. }
  203. }
  204. if (ctx->drv_ctx[DECRYPT]) {
  205. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  206. ctx->sh_desc_dec);
  207. if (ret) {
  208. dev_err(jrdev, "driver dec context update failed\n");
  209. goto badkey;
  210. }
  211. }
  212. return ret;
  213. badkey:
  214. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  215. return -EINVAL;
  216. }
  217. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  218. const u8 *key, unsigned int keylen)
  219. {
  220. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  221. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
  222. const char *alg_name = crypto_tfm_alg_name(tfm);
  223. struct device *jrdev = ctx->jrdev;
  224. unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  225. u32 ctx1_iv_off = 0;
  226. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  227. OP_ALG_AAI_CTR_MOD128);
  228. const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
  229. int ret = 0;
  230. memcpy(ctx->key, key, keylen);
  231. #ifdef DEBUG
  232. print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
  233. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  234. #endif
  235. /*
  236. * AES-CTR needs to load IV in CONTEXT1 reg
  237. * at an offset of 128bits (16bytes)
  238. * CONTEXT1[255:128] = IV
  239. */
  240. if (ctr_mode)
  241. ctx1_iv_off = 16;
  242. /*
  243. * RFC3686 specific:
  244. * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  245. * | *key = {KEY, NONCE}
  246. */
  247. if (is_rfc3686) {
  248. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  249. keylen -= CTR_RFC3686_NONCE_SIZE;
  250. }
  251. dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
  252. ctx->cdata.keylen = keylen;
  253. ctx->cdata.key_virt = ctx->key;
  254. ctx->cdata.key_inline = true;
  255. /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
  256. cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  257. is_rfc3686, ctx1_iv_off);
  258. cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  259. is_rfc3686, ctx1_iv_off);
  260. cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
  261. ivsize, is_rfc3686, ctx1_iv_off);
  262. /* Now update the driver contexts with the new shared descriptor */
  263. if (ctx->drv_ctx[ENCRYPT]) {
  264. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  265. ctx->sh_desc_enc);
  266. if (ret) {
  267. dev_err(jrdev, "driver enc context update failed\n");
  268. goto badkey;
  269. }
  270. }
  271. if (ctx->drv_ctx[DECRYPT]) {
  272. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  273. ctx->sh_desc_dec);
  274. if (ret) {
  275. dev_err(jrdev, "driver dec context update failed\n");
  276. goto badkey;
  277. }
  278. }
  279. if (ctx->drv_ctx[GIVENCRYPT]) {
  280. ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
  281. ctx->sh_desc_givenc);
  282. if (ret) {
  283. dev_err(jrdev, "driver givenc context update failed\n");
  284. goto badkey;
  285. }
  286. }
  287. return ret;
  288. badkey:
  289. crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  290. return -EINVAL;
  291. }
  292. static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  293. const u8 *key, unsigned int keylen)
  294. {
  295. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  296. struct device *jrdev = ctx->jrdev;
  297. int ret = 0;
  298. if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
  299. dev_err(jrdev, "key size mismatch\n");
  300. goto badkey;
  301. }
  302. memcpy(ctx->key, key, keylen);
  303. dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
  304. ctx->cdata.keylen = keylen;
  305. ctx->cdata.key_virt = ctx->key;
  306. ctx->cdata.key_inline = true;
  307. /* xts ablkcipher encrypt, decrypt shared descriptors */
  308. cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
  309. cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
  310. /* Now update the driver contexts with the new shared descriptor */
  311. if (ctx->drv_ctx[ENCRYPT]) {
  312. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  313. ctx->sh_desc_enc);
  314. if (ret) {
  315. dev_err(jrdev, "driver enc context update failed\n");
  316. goto badkey;
  317. }
  318. }
  319. if (ctx->drv_ctx[DECRYPT]) {
  320. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  321. ctx->sh_desc_dec);
  322. if (ret) {
  323. dev_err(jrdev, "driver dec context update failed\n");
  324. goto badkey;
  325. }
  326. }
  327. return ret;
  328. badkey:
  329. crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  330. return -EINVAL;
  331. }
  332. /*
  333. * aead_edesc - s/w-extended aead descriptor
  334. * @src_nents: number of segments in input scatterlist
  335. * @dst_nents: number of segments in output scatterlist
  336. * @iv_dma: dma address of iv for checking continuity and link table
  337. * @qm_sg_bytes: length of dma mapped h/w link table
  338. * @qm_sg_dma: bus physical mapped address of h/w link table
  339. * @assoclen: associated data length, in CAAM endianness
  340. * @assoclen_dma: bus physical mapped address of req->assoclen
  341. * @drv_req: driver-specific request structure
  342. * @sgt: the h/w link table, followed by IV
  343. */
  344. struct aead_edesc {
  345. int src_nents;
  346. int dst_nents;
  347. dma_addr_t iv_dma;
  348. int qm_sg_bytes;
  349. dma_addr_t qm_sg_dma;
  350. unsigned int assoclen;
  351. dma_addr_t assoclen_dma;
  352. struct caam_drv_req drv_req;
  353. struct qm_sg_entry sgt[0];
  354. };
  355. /*
  356. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  357. * @src_nents: number of segments in input scatterlist
  358. * @dst_nents: number of segments in output scatterlist
  359. * @iv_dma: dma address of iv for checking continuity and link table
  360. * @qm_sg_bytes: length of dma mapped h/w link table
  361. * @qm_sg_dma: bus physical mapped address of h/w link table
  362. * @drv_req: driver-specific request structure
  363. * @sgt: the h/w link table, followed by IV
  364. */
  365. struct ablkcipher_edesc {
  366. int src_nents;
  367. int dst_nents;
  368. dma_addr_t iv_dma;
  369. int qm_sg_bytes;
  370. dma_addr_t qm_sg_dma;
  371. struct caam_drv_req drv_req;
  372. struct qm_sg_entry sgt[0];
  373. };
  374. static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
  375. enum optype type)
  376. {
  377. /*
  378. * This function is called on the fast path with values of 'type'
  379. * known at compile time. Invalid arguments are not expected and
  380. * thus no checks are made.
  381. */
  382. struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
  383. u32 *desc;
  384. if (unlikely(!drv_ctx)) {
  385. spin_lock(&ctx->lock);
  386. /* Read again to check if some other core init drv_ctx */
  387. drv_ctx = ctx->drv_ctx[type];
  388. if (!drv_ctx) {
  389. int cpu;
  390. if (type == ENCRYPT)
  391. desc = ctx->sh_desc_enc;
  392. else if (type == DECRYPT)
  393. desc = ctx->sh_desc_dec;
  394. else /* (type == GIVENCRYPT) */
  395. desc = ctx->sh_desc_givenc;
  396. cpu = smp_processor_id();
  397. drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
  398. if (likely(!IS_ERR_OR_NULL(drv_ctx)))
  399. drv_ctx->op_type = type;
  400. ctx->drv_ctx[type] = drv_ctx;
  401. }
  402. spin_unlock(&ctx->lock);
  403. }
  404. return drv_ctx;
  405. }
  406. static void caam_unmap(struct device *dev, struct scatterlist *src,
  407. struct scatterlist *dst, int src_nents,
  408. int dst_nents, dma_addr_t iv_dma, int ivsize,
  409. enum optype op_type, dma_addr_t qm_sg_dma,
  410. int qm_sg_bytes)
  411. {
  412. if (dst != src) {
  413. if (src_nents)
  414. dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
  415. dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
  416. } else {
  417. dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
  418. }
  419. if (iv_dma)
  420. dma_unmap_single(dev, iv_dma, ivsize,
  421. op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
  422. DMA_TO_DEVICE);
  423. if (qm_sg_bytes)
  424. dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
  425. }
  426. static void aead_unmap(struct device *dev,
  427. struct aead_edesc *edesc,
  428. struct aead_request *req)
  429. {
  430. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  431. int ivsize = crypto_aead_ivsize(aead);
  432. caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
  433. edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
  434. edesc->qm_sg_dma, edesc->qm_sg_bytes);
  435. dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  436. }
  437. static void ablkcipher_unmap(struct device *dev,
  438. struct ablkcipher_edesc *edesc,
  439. struct ablkcipher_request *req)
  440. {
  441. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  442. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  443. caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
  444. edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
  445. edesc->qm_sg_dma, edesc->qm_sg_bytes);
  446. }
  447. static void aead_done(struct caam_drv_req *drv_req, u32 status)
  448. {
  449. struct device *qidev;
  450. struct aead_edesc *edesc;
  451. struct aead_request *aead_req = drv_req->app_ctx;
  452. struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
  453. struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
  454. int ecode = 0;
  455. qidev = caam_ctx->qidev;
  456. if (unlikely(status)) {
  457. caam_jr_strstatus(qidev, status);
  458. ecode = -EIO;
  459. }
  460. edesc = container_of(drv_req, typeof(*edesc), drv_req);
  461. aead_unmap(qidev, edesc, aead_req);
  462. aead_request_complete(aead_req, ecode);
  463. qi_cache_free(edesc);
  464. }
  465. /*
  466. * allocate and map the aead extended descriptor
  467. */
  468. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  469. bool encrypt)
  470. {
  471. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  472. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  473. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  474. typeof(*alg), aead);
  475. struct device *qidev = ctx->qidev;
  476. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  477. GFP_KERNEL : GFP_ATOMIC;
  478. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  479. struct aead_edesc *edesc;
  480. dma_addr_t qm_sg_dma, iv_dma = 0;
  481. int ivsize = 0;
  482. unsigned int authsize = ctx->authsize;
  483. int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
  484. int in_len, out_len;
  485. struct qm_sg_entry *sg_table, *fd_sgt;
  486. struct caam_drv_ctx *drv_ctx;
  487. enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
  488. drv_ctx = get_drv_ctx(ctx, op_type);
  489. if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
  490. return (struct aead_edesc *)drv_ctx;
  491. /* allocate space for base edesc and hw desc commands, link tables */
  492. edesc = qi_cache_alloc(GFP_DMA | flags);
  493. if (unlikely(!edesc)) {
  494. dev_err(qidev, "could not allocate extended descriptor\n");
  495. return ERR_PTR(-ENOMEM);
  496. }
  497. if (likely(req->src == req->dst)) {
  498. src_nents = sg_nents_for_len(req->src, req->assoclen +
  499. req->cryptlen +
  500. (encrypt ? authsize : 0));
  501. if (unlikely(src_nents < 0)) {
  502. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  503. req->assoclen + req->cryptlen +
  504. (encrypt ? authsize : 0));
  505. qi_cache_free(edesc);
  506. return ERR_PTR(src_nents);
  507. }
  508. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  509. DMA_BIDIRECTIONAL);
  510. if (unlikely(!mapped_src_nents)) {
  511. dev_err(qidev, "unable to map source\n");
  512. qi_cache_free(edesc);
  513. return ERR_PTR(-ENOMEM);
  514. }
  515. } else {
  516. src_nents = sg_nents_for_len(req->src, req->assoclen +
  517. req->cryptlen);
  518. if (unlikely(src_nents < 0)) {
  519. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  520. req->assoclen + req->cryptlen);
  521. qi_cache_free(edesc);
  522. return ERR_PTR(src_nents);
  523. }
  524. dst_nents = sg_nents_for_len(req->dst, req->assoclen +
  525. req->cryptlen +
  526. (encrypt ? authsize :
  527. (-authsize)));
  528. if (unlikely(dst_nents < 0)) {
  529. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  530. req->assoclen + req->cryptlen +
  531. (encrypt ? authsize : (-authsize)));
  532. qi_cache_free(edesc);
  533. return ERR_PTR(dst_nents);
  534. }
  535. if (src_nents) {
  536. mapped_src_nents = dma_map_sg(qidev, req->src,
  537. src_nents, DMA_TO_DEVICE);
  538. if (unlikely(!mapped_src_nents)) {
  539. dev_err(qidev, "unable to map source\n");
  540. qi_cache_free(edesc);
  541. return ERR_PTR(-ENOMEM);
  542. }
  543. } else {
  544. mapped_src_nents = 0;
  545. }
  546. mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
  547. DMA_FROM_DEVICE);
  548. if (unlikely(!mapped_dst_nents)) {
  549. dev_err(qidev, "unable to map destination\n");
  550. dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  551. qi_cache_free(edesc);
  552. return ERR_PTR(-ENOMEM);
  553. }
  554. }
  555. if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
  556. ivsize = crypto_aead_ivsize(aead);
  557. /*
  558. * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
  559. * Input is not contiguous.
  560. */
  561. qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
  562. (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
  563. sg_table = &edesc->sgt[0];
  564. qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
  565. if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
  566. CAAM_QI_MEMCACHE_SIZE)) {
  567. dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
  568. qm_sg_ents, ivsize);
  569. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  570. 0, 0, 0, 0);
  571. qi_cache_free(edesc);
  572. return ERR_PTR(-ENOMEM);
  573. }
  574. if (ivsize) {
  575. u8 *iv = (u8 *)(sg_table + qm_sg_ents);
  576. /* Make sure IV is located in a DMAable area */
  577. memcpy(iv, req->iv, ivsize);
  578. iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
  579. if (dma_mapping_error(qidev, iv_dma)) {
  580. dev_err(qidev, "unable to map IV\n");
  581. caam_unmap(qidev, req->src, req->dst, src_nents,
  582. dst_nents, 0, 0, 0, 0, 0);
  583. qi_cache_free(edesc);
  584. return ERR_PTR(-ENOMEM);
  585. }
  586. }
  587. edesc->src_nents = src_nents;
  588. edesc->dst_nents = dst_nents;
  589. edesc->iv_dma = iv_dma;
  590. edesc->drv_req.app_ctx = req;
  591. edesc->drv_req.cbk = aead_done;
  592. edesc->drv_req.drv_ctx = drv_ctx;
  593. edesc->assoclen = cpu_to_caam32(req->assoclen);
  594. edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
  595. DMA_TO_DEVICE);
  596. if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
  597. dev_err(qidev, "unable to map assoclen\n");
  598. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  599. iv_dma, ivsize, op_type, 0, 0);
  600. qi_cache_free(edesc);
  601. return ERR_PTR(-ENOMEM);
  602. }
  603. dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
  604. qm_sg_index++;
  605. if (ivsize) {
  606. dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
  607. qm_sg_index++;
  608. }
  609. sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
  610. qm_sg_index += mapped_src_nents;
  611. if (mapped_dst_nents > 1)
  612. sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
  613. qm_sg_index, 0);
  614. qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
  615. if (dma_mapping_error(qidev, qm_sg_dma)) {
  616. dev_err(qidev, "unable to map S/G table\n");
  617. dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  618. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  619. iv_dma, ivsize, op_type, 0, 0);
  620. qi_cache_free(edesc);
  621. return ERR_PTR(-ENOMEM);
  622. }
  623. edesc->qm_sg_dma = qm_sg_dma;
  624. edesc->qm_sg_bytes = qm_sg_bytes;
  625. out_len = req->assoclen + req->cryptlen +
  626. (encrypt ? ctx->authsize : (-ctx->authsize));
  627. in_len = 4 + ivsize + req->assoclen + req->cryptlen;
  628. fd_sgt = &edesc->drv_req.fd_sgt[0];
  629. dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
  630. if (req->dst == req->src) {
  631. if (mapped_src_nents == 1)
  632. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
  633. out_len, 0);
  634. else
  635. dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
  636. (1 + !!ivsize) * sizeof(*sg_table),
  637. out_len, 0);
  638. } else if (mapped_dst_nents == 1) {
  639. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
  640. 0);
  641. } else {
  642. dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
  643. qm_sg_index, out_len, 0);
  644. }
  645. return edesc;
  646. }
  647. static inline int aead_crypt(struct aead_request *req, bool encrypt)
  648. {
  649. struct aead_edesc *edesc;
  650. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  651. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  652. int ret;
  653. if (unlikely(caam_congested))
  654. return -EAGAIN;
  655. /* allocate extended descriptor */
  656. edesc = aead_edesc_alloc(req, encrypt);
  657. if (IS_ERR_OR_NULL(edesc))
  658. return PTR_ERR(edesc);
  659. /* Create and submit job descriptor */
  660. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  661. if (!ret) {
  662. ret = -EINPROGRESS;
  663. } else {
  664. aead_unmap(ctx->qidev, edesc, req);
  665. qi_cache_free(edesc);
  666. }
  667. return ret;
  668. }
  669. static int aead_encrypt(struct aead_request *req)
  670. {
  671. return aead_crypt(req, true);
  672. }
  673. static int aead_decrypt(struct aead_request *req)
  674. {
  675. return aead_crypt(req, false);
  676. }
  677. static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
  678. {
  679. struct ablkcipher_edesc *edesc;
  680. struct ablkcipher_request *req = drv_req->app_ctx;
  681. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  682. struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
  683. struct device *qidev = caam_ctx->qidev;
  684. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  685. #ifdef DEBUG
  686. dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
  687. #endif
  688. edesc = container_of(drv_req, typeof(*edesc), drv_req);
  689. if (status)
  690. caam_jr_strstatus(qidev, status);
  691. #ifdef DEBUG
  692. print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
  693. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  694. edesc->src_nents > 1 ? 100 : ivsize, 1);
  695. caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
  696. DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  697. edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
  698. #endif
  699. ablkcipher_unmap(qidev, edesc, req);
  700. /* In case initial IV was generated, copy it in GIVCIPHER request */
  701. if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
  702. u8 *iv;
  703. struct skcipher_givcrypt_request *greq;
  704. greq = container_of(req, struct skcipher_givcrypt_request,
  705. creq);
  706. iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
  707. memcpy(greq->giv, iv, ivsize);
  708. }
  709. /*
  710. * The crypto API expects us to set the IV (req->info) to the last
  711. * ciphertext block. This is used e.g. by the CTS mode.
  712. */
  713. if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
  714. scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
  715. ivsize, ivsize, 0);
  716. qi_cache_free(edesc);
  717. ablkcipher_request_complete(req, status);
  718. }
  719. static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
  720. *req, bool encrypt)
  721. {
  722. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  723. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  724. struct device *qidev = ctx->qidev;
  725. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  726. GFP_KERNEL : GFP_ATOMIC;
  727. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  728. struct ablkcipher_edesc *edesc;
  729. dma_addr_t iv_dma;
  730. u8 *iv;
  731. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  732. int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
  733. struct qm_sg_entry *sg_table, *fd_sgt;
  734. struct caam_drv_ctx *drv_ctx;
  735. enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
  736. drv_ctx = get_drv_ctx(ctx, op_type);
  737. if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
  738. return (struct ablkcipher_edesc *)drv_ctx;
  739. src_nents = sg_nents_for_len(req->src, req->nbytes);
  740. if (unlikely(src_nents < 0)) {
  741. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  742. req->nbytes);
  743. return ERR_PTR(src_nents);
  744. }
  745. if (unlikely(req->src != req->dst)) {
  746. dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  747. if (unlikely(dst_nents < 0)) {
  748. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  749. req->nbytes);
  750. return ERR_PTR(dst_nents);
  751. }
  752. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  753. DMA_TO_DEVICE);
  754. if (unlikely(!mapped_src_nents)) {
  755. dev_err(qidev, "unable to map source\n");
  756. return ERR_PTR(-ENOMEM);
  757. }
  758. mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
  759. DMA_FROM_DEVICE);
  760. if (unlikely(!mapped_dst_nents)) {
  761. dev_err(qidev, "unable to map destination\n");
  762. dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  763. return ERR_PTR(-ENOMEM);
  764. }
  765. } else {
  766. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  767. DMA_BIDIRECTIONAL);
  768. if (unlikely(!mapped_src_nents)) {
  769. dev_err(qidev, "unable to map source\n");
  770. return ERR_PTR(-ENOMEM);
  771. }
  772. }
  773. qm_sg_ents = 1 + mapped_src_nents;
  774. dst_sg_idx = qm_sg_ents;
  775. qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
  776. qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
  777. if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
  778. ivsize > CAAM_QI_MEMCACHE_SIZE)) {
  779. dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
  780. qm_sg_ents, ivsize);
  781. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  782. 0, 0, 0, 0);
  783. return ERR_PTR(-ENOMEM);
  784. }
  785. /* allocate space for base edesc, link tables and IV */
  786. edesc = qi_cache_alloc(GFP_DMA | flags);
  787. if (unlikely(!edesc)) {
  788. dev_err(qidev, "could not allocate extended descriptor\n");
  789. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  790. 0, 0, 0, 0);
  791. return ERR_PTR(-ENOMEM);
  792. }
  793. /* Make sure IV is located in a DMAable area */
  794. sg_table = &edesc->sgt[0];
  795. iv = (u8 *)(sg_table + qm_sg_ents);
  796. memcpy(iv, req->info, ivsize);
  797. iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
  798. if (dma_mapping_error(qidev, iv_dma)) {
  799. dev_err(qidev, "unable to map IV\n");
  800. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  801. 0, 0, 0, 0);
  802. qi_cache_free(edesc);
  803. return ERR_PTR(-ENOMEM);
  804. }
  805. edesc->src_nents = src_nents;
  806. edesc->dst_nents = dst_nents;
  807. edesc->iv_dma = iv_dma;
  808. edesc->qm_sg_bytes = qm_sg_bytes;
  809. edesc->drv_req.app_ctx = req;
  810. edesc->drv_req.cbk = ablkcipher_done;
  811. edesc->drv_req.drv_ctx = drv_ctx;
  812. dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
  813. sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
  814. if (mapped_dst_nents > 1)
  815. sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
  816. dst_sg_idx, 0);
  817. edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
  818. DMA_TO_DEVICE);
  819. if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
  820. dev_err(qidev, "unable to map S/G table\n");
  821. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  822. iv_dma, ivsize, op_type, 0, 0);
  823. qi_cache_free(edesc);
  824. return ERR_PTR(-ENOMEM);
  825. }
  826. fd_sgt = &edesc->drv_req.fd_sgt[0];
  827. dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
  828. ivsize + req->nbytes, 0);
  829. if (req->src == req->dst) {
  830. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
  831. sizeof(*sg_table), req->nbytes, 0);
  832. } else if (mapped_dst_nents > 1) {
  833. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
  834. sizeof(*sg_table), req->nbytes, 0);
  835. } else {
  836. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
  837. req->nbytes, 0);
  838. }
  839. return edesc;
  840. }
  841. static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
  842. struct skcipher_givcrypt_request *creq)
  843. {
  844. struct ablkcipher_request *req = &creq->creq;
  845. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  846. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  847. struct device *qidev = ctx->qidev;
  848. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  849. GFP_KERNEL : GFP_ATOMIC;
  850. int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
  851. struct ablkcipher_edesc *edesc;
  852. dma_addr_t iv_dma;
  853. u8 *iv;
  854. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  855. struct qm_sg_entry *sg_table, *fd_sgt;
  856. int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
  857. struct caam_drv_ctx *drv_ctx;
  858. drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
  859. if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
  860. return (struct ablkcipher_edesc *)drv_ctx;
  861. src_nents = sg_nents_for_len(req->src, req->nbytes);
  862. if (unlikely(src_nents < 0)) {
  863. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  864. req->nbytes);
  865. return ERR_PTR(src_nents);
  866. }
  867. if (unlikely(req->src != req->dst)) {
  868. dst_nents = sg_nents_for_len(req->dst, req->nbytes);
  869. if (unlikely(dst_nents < 0)) {
  870. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  871. req->nbytes);
  872. return ERR_PTR(dst_nents);
  873. }
  874. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  875. DMA_TO_DEVICE);
  876. if (unlikely(!mapped_src_nents)) {
  877. dev_err(qidev, "unable to map source\n");
  878. return ERR_PTR(-ENOMEM);
  879. }
  880. mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
  881. DMA_FROM_DEVICE);
  882. if (unlikely(!mapped_dst_nents)) {
  883. dev_err(qidev, "unable to map destination\n");
  884. dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  885. return ERR_PTR(-ENOMEM);
  886. }
  887. } else {
  888. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  889. DMA_BIDIRECTIONAL);
  890. if (unlikely(!mapped_src_nents)) {
  891. dev_err(qidev, "unable to map source\n");
  892. return ERR_PTR(-ENOMEM);
  893. }
  894. dst_nents = src_nents;
  895. mapped_dst_nents = src_nents;
  896. }
  897. qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
  898. dst_sg_idx = qm_sg_ents;
  899. qm_sg_ents += 1 + mapped_dst_nents;
  900. qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
  901. if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
  902. ivsize > CAAM_QI_MEMCACHE_SIZE)) {
  903. dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
  904. qm_sg_ents, ivsize);
  905. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  906. 0, 0, 0, 0);
  907. return ERR_PTR(-ENOMEM);
  908. }
  909. /* allocate space for base edesc, link tables and IV */
  910. edesc = qi_cache_alloc(GFP_DMA | flags);
  911. if (!edesc) {
  912. dev_err(qidev, "could not allocate extended descriptor\n");
  913. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  914. 0, 0, 0, 0);
  915. return ERR_PTR(-ENOMEM);
  916. }
  917. /* Make sure IV is located in a DMAable area */
  918. sg_table = &edesc->sgt[0];
  919. iv = (u8 *)(sg_table + qm_sg_ents);
  920. iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
  921. if (dma_mapping_error(qidev, iv_dma)) {
  922. dev_err(qidev, "unable to map IV\n");
  923. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  924. 0, 0, 0, 0);
  925. qi_cache_free(edesc);
  926. return ERR_PTR(-ENOMEM);
  927. }
  928. edesc->src_nents = src_nents;
  929. edesc->dst_nents = dst_nents;
  930. edesc->iv_dma = iv_dma;
  931. edesc->qm_sg_bytes = qm_sg_bytes;
  932. edesc->drv_req.app_ctx = req;
  933. edesc->drv_req.cbk = ablkcipher_done;
  934. edesc->drv_req.drv_ctx = drv_ctx;
  935. if (mapped_src_nents > 1)
  936. sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
  937. dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
  938. sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
  939. 0);
  940. edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
  941. DMA_TO_DEVICE);
  942. if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
  943. dev_err(qidev, "unable to map S/G table\n");
  944. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  945. iv_dma, ivsize, GIVENCRYPT, 0, 0);
  946. qi_cache_free(edesc);
  947. return ERR_PTR(-ENOMEM);
  948. }
  949. fd_sgt = &edesc->drv_req.fd_sgt[0];
  950. if (mapped_src_nents > 1)
  951. dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
  952. 0);
  953. else
  954. dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
  955. req->nbytes, 0);
  956. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
  957. sizeof(*sg_table), ivsize + req->nbytes, 0);
  958. return edesc;
  959. }
  960. static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
  961. {
  962. struct ablkcipher_edesc *edesc;
  963. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  964. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  965. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  966. int ret;
  967. if (unlikely(caam_congested))
  968. return -EAGAIN;
  969. /* allocate extended descriptor */
  970. edesc = ablkcipher_edesc_alloc(req, encrypt);
  971. if (IS_ERR(edesc))
  972. return PTR_ERR(edesc);
  973. /*
  974. * The crypto API expects us to set the IV (req->info) to the last
  975. * ciphertext block.
  976. */
  977. if (!encrypt)
  978. scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
  979. ivsize, ivsize, 0);
  980. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  981. if (!ret) {
  982. ret = -EINPROGRESS;
  983. } else {
  984. ablkcipher_unmap(ctx->qidev, edesc, req);
  985. qi_cache_free(edesc);
  986. }
  987. return ret;
  988. }
  989. static int ablkcipher_encrypt(struct ablkcipher_request *req)
  990. {
  991. return ablkcipher_crypt(req, true);
  992. }
  993. static int ablkcipher_decrypt(struct ablkcipher_request *req)
  994. {
  995. return ablkcipher_crypt(req, false);
  996. }
  997. static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
  998. {
  999. struct ablkcipher_request *req = &creq->creq;
  1000. struct ablkcipher_edesc *edesc;
  1001. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1002. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1003. int ret;
  1004. if (unlikely(caam_congested))
  1005. return -EAGAIN;
  1006. /* allocate extended descriptor */
  1007. edesc = ablkcipher_giv_edesc_alloc(creq);
  1008. if (IS_ERR(edesc))
  1009. return PTR_ERR(edesc);
  1010. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  1011. if (!ret) {
  1012. ret = -EINPROGRESS;
  1013. } else {
  1014. ablkcipher_unmap(ctx->qidev, edesc, req);
  1015. qi_cache_free(edesc);
  1016. }
  1017. return ret;
  1018. }
  1019. #define template_ablkcipher template_u.ablkcipher
  1020. struct caam_alg_template {
  1021. char name[CRYPTO_MAX_ALG_NAME];
  1022. char driver_name[CRYPTO_MAX_ALG_NAME];
  1023. unsigned int blocksize;
  1024. u32 type;
  1025. union {
  1026. struct ablkcipher_alg ablkcipher;
  1027. } template_u;
  1028. u32 class1_alg_type;
  1029. u32 class2_alg_type;
  1030. };
  1031. static struct caam_alg_template driver_algs[] = {
  1032. /* ablkcipher descriptor */
  1033. {
  1034. .name = "cbc(aes)",
  1035. .driver_name = "cbc-aes-caam-qi",
  1036. .blocksize = AES_BLOCK_SIZE,
  1037. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1038. .template_ablkcipher = {
  1039. .setkey = ablkcipher_setkey,
  1040. .encrypt = ablkcipher_encrypt,
  1041. .decrypt = ablkcipher_decrypt,
  1042. .givencrypt = ablkcipher_givencrypt,
  1043. .geniv = "<built-in>",
  1044. .min_keysize = AES_MIN_KEY_SIZE,
  1045. .max_keysize = AES_MAX_KEY_SIZE,
  1046. .ivsize = AES_BLOCK_SIZE,
  1047. },
  1048. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1049. },
  1050. {
  1051. .name = "cbc(des3_ede)",
  1052. .driver_name = "cbc-3des-caam-qi",
  1053. .blocksize = DES3_EDE_BLOCK_SIZE,
  1054. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1055. .template_ablkcipher = {
  1056. .setkey = ablkcipher_setkey,
  1057. .encrypt = ablkcipher_encrypt,
  1058. .decrypt = ablkcipher_decrypt,
  1059. .givencrypt = ablkcipher_givencrypt,
  1060. .geniv = "<built-in>",
  1061. .min_keysize = DES3_EDE_KEY_SIZE,
  1062. .max_keysize = DES3_EDE_KEY_SIZE,
  1063. .ivsize = DES3_EDE_BLOCK_SIZE,
  1064. },
  1065. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1066. },
  1067. {
  1068. .name = "cbc(des)",
  1069. .driver_name = "cbc-des-caam-qi",
  1070. .blocksize = DES_BLOCK_SIZE,
  1071. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1072. .template_ablkcipher = {
  1073. .setkey = ablkcipher_setkey,
  1074. .encrypt = ablkcipher_encrypt,
  1075. .decrypt = ablkcipher_decrypt,
  1076. .givencrypt = ablkcipher_givencrypt,
  1077. .geniv = "<built-in>",
  1078. .min_keysize = DES_KEY_SIZE,
  1079. .max_keysize = DES_KEY_SIZE,
  1080. .ivsize = DES_BLOCK_SIZE,
  1081. },
  1082. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1083. },
  1084. {
  1085. .name = "ctr(aes)",
  1086. .driver_name = "ctr-aes-caam-qi",
  1087. .blocksize = 1,
  1088. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1089. .template_ablkcipher = {
  1090. .setkey = ablkcipher_setkey,
  1091. .encrypt = ablkcipher_encrypt,
  1092. .decrypt = ablkcipher_decrypt,
  1093. .geniv = "chainiv",
  1094. .min_keysize = AES_MIN_KEY_SIZE,
  1095. .max_keysize = AES_MAX_KEY_SIZE,
  1096. .ivsize = AES_BLOCK_SIZE,
  1097. },
  1098. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  1099. },
  1100. {
  1101. .name = "rfc3686(ctr(aes))",
  1102. .driver_name = "rfc3686-ctr-aes-caam-qi",
  1103. .blocksize = 1,
  1104. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  1105. .template_ablkcipher = {
  1106. .setkey = ablkcipher_setkey,
  1107. .encrypt = ablkcipher_encrypt,
  1108. .decrypt = ablkcipher_decrypt,
  1109. .givencrypt = ablkcipher_givencrypt,
  1110. .geniv = "<built-in>",
  1111. .min_keysize = AES_MIN_KEY_SIZE +
  1112. CTR_RFC3686_NONCE_SIZE,
  1113. .max_keysize = AES_MAX_KEY_SIZE +
  1114. CTR_RFC3686_NONCE_SIZE,
  1115. .ivsize = CTR_RFC3686_IV_SIZE,
  1116. },
  1117. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  1118. },
  1119. {
  1120. .name = "xts(aes)",
  1121. .driver_name = "xts-aes-caam-qi",
  1122. .blocksize = AES_BLOCK_SIZE,
  1123. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  1124. .template_ablkcipher = {
  1125. .setkey = xts_ablkcipher_setkey,
  1126. .encrypt = ablkcipher_encrypt,
  1127. .decrypt = ablkcipher_decrypt,
  1128. .geniv = "eseqiv",
  1129. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  1130. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  1131. .ivsize = AES_BLOCK_SIZE,
  1132. },
  1133. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
  1134. },
  1135. };
  1136. static struct caam_aead_alg driver_aeads[] = {
  1137. /* single-pass ipsec_esp descriptor */
  1138. {
  1139. .aead = {
  1140. .base = {
  1141. .cra_name = "authenc(hmac(md5),cbc(aes))",
  1142. .cra_driver_name = "authenc-hmac-md5-"
  1143. "cbc-aes-caam-qi",
  1144. .cra_blocksize = AES_BLOCK_SIZE,
  1145. },
  1146. .setkey = aead_setkey,
  1147. .setauthsize = aead_setauthsize,
  1148. .encrypt = aead_encrypt,
  1149. .decrypt = aead_decrypt,
  1150. .ivsize = AES_BLOCK_SIZE,
  1151. .maxauthsize = MD5_DIGEST_SIZE,
  1152. },
  1153. .caam = {
  1154. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1155. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1156. OP_ALG_AAI_HMAC_PRECOMP,
  1157. }
  1158. },
  1159. {
  1160. .aead = {
  1161. .base = {
  1162. .cra_name = "echainiv(authenc(hmac(md5),"
  1163. "cbc(aes)))",
  1164. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1165. "cbc-aes-caam-qi",
  1166. .cra_blocksize = AES_BLOCK_SIZE,
  1167. },
  1168. .setkey = aead_setkey,
  1169. .setauthsize = aead_setauthsize,
  1170. .encrypt = aead_encrypt,
  1171. .decrypt = aead_decrypt,
  1172. .ivsize = AES_BLOCK_SIZE,
  1173. .maxauthsize = MD5_DIGEST_SIZE,
  1174. },
  1175. .caam = {
  1176. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1177. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1178. OP_ALG_AAI_HMAC_PRECOMP,
  1179. .geniv = true,
  1180. }
  1181. },
  1182. {
  1183. .aead = {
  1184. .base = {
  1185. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1186. .cra_driver_name = "authenc-hmac-sha1-"
  1187. "cbc-aes-caam-qi",
  1188. .cra_blocksize = AES_BLOCK_SIZE,
  1189. },
  1190. .setkey = aead_setkey,
  1191. .setauthsize = aead_setauthsize,
  1192. .encrypt = aead_encrypt,
  1193. .decrypt = aead_decrypt,
  1194. .ivsize = AES_BLOCK_SIZE,
  1195. .maxauthsize = SHA1_DIGEST_SIZE,
  1196. },
  1197. .caam = {
  1198. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1199. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1200. OP_ALG_AAI_HMAC_PRECOMP,
  1201. }
  1202. },
  1203. {
  1204. .aead = {
  1205. .base = {
  1206. .cra_name = "echainiv(authenc(hmac(sha1),"
  1207. "cbc(aes)))",
  1208. .cra_driver_name = "echainiv-authenc-"
  1209. "hmac-sha1-cbc-aes-caam-qi",
  1210. .cra_blocksize = AES_BLOCK_SIZE,
  1211. },
  1212. .setkey = aead_setkey,
  1213. .setauthsize = aead_setauthsize,
  1214. .encrypt = aead_encrypt,
  1215. .decrypt = aead_decrypt,
  1216. .ivsize = AES_BLOCK_SIZE,
  1217. .maxauthsize = SHA1_DIGEST_SIZE,
  1218. },
  1219. .caam = {
  1220. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1221. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1222. OP_ALG_AAI_HMAC_PRECOMP,
  1223. .geniv = true,
  1224. },
  1225. },
  1226. {
  1227. .aead = {
  1228. .base = {
  1229. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  1230. .cra_driver_name = "authenc-hmac-sha224-"
  1231. "cbc-aes-caam-qi",
  1232. .cra_blocksize = AES_BLOCK_SIZE,
  1233. },
  1234. .setkey = aead_setkey,
  1235. .setauthsize = aead_setauthsize,
  1236. .encrypt = aead_encrypt,
  1237. .decrypt = aead_decrypt,
  1238. .ivsize = AES_BLOCK_SIZE,
  1239. .maxauthsize = SHA224_DIGEST_SIZE,
  1240. },
  1241. .caam = {
  1242. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1243. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1244. OP_ALG_AAI_HMAC_PRECOMP,
  1245. }
  1246. },
  1247. {
  1248. .aead = {
  1249. .base = {
  1250. .cra_name = "echainiv(authenc(hmac(sha224),"
  1251. "cbc(aes)))",
  1252. .cra_driver_name = "echainiv-authenc-"
  1253. "hmac-sha224-cbc-aes-caam-qi",
  1254. .cra_blocksize = AES_BLOCK_SIZE,
  1255. },
  1256. .setkey = aead_setkey,
  1257. .setauthsize = aead_setauthsize,
  1258. .encrypt = aead_encrypt,
  1259. .decrypt = aead_decrypt,
  1260. .ivsize = AES_BLOCK_SIZE,
  1261. .maxauthsize = SHA224_DIGEST_SIZE,
  1262. },
  1263. .caam = {
  1264. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1265. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1266. OP_ALG_AAI_HMAC_PRECOMP,
  1267. .geniv = true,
  1268. }
  1269. },
  1270. {
  1271. .aead = {
  1272. .base = {
  1273. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  1274. .cra_driver_name = "authenc-hmac-sha256-"
  1275. "cbc-aes-caam-qi",
  1276. .cra_blocksize = AES_BLOCK_SIZE,
  1277. },
  1278. .setkey = aead_setkey,
  1279. .setauthsize = aead_setauthsize,
  1280. .encrypt = aead_encrypt,
  1281. .decrypt = aead_decrypt,
  1282. .ivsize = AES_BLOCK_SIZE,
  1283. .maxauthsize = SHA256_DIGEST_SIZE,
  1284. },
  1285. .caam = {
  1286. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1287. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1288. OP_ALG_AAI_HMAC_PRECOMP,
  1289. }
  1290. },
  1291. {
  1292. .aead = {
  1293. .base = {
  1294. .cra_name = "echainiv(authenc(hmac(sha256),"
  1295. "cbc(aes)))",
  1296. .cra_driver_name = "echainiv-authenc-"
  1297. "hmac-sha256-cbc-aes-"
  1298. "caam-qi",
  1299. .cra_blocksize = AES_BLOCK_SIZE,
  1300. },
  1301. .setkey = aead_setkey,
  1302. .setauthsize = aead_setauthsize,
  1303. .encrypt = aead_encrypt,
  1304. .decrypt = aead_decrypt,
  1305. .ivsize = AES_BLOCK_SIZE,
  1306. .maxauthsize = SHA256_DIGEST_SIZE,
  1307. },
  1308. .caam = {
  1309. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1310. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1311. OP_ALG_AAI_HMAC_PRECOMP,
  1312. .geniv = true,
  1313. }
  1314. },
  1315. {
  1316. .aead = {
  1317. .base = {
  1318. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  1319. .cra_driver_name = "authenc-hmac-sha384-"
  1320. "cbc-aes-caam-qi",
  1321. .cra_blocksize = AES_BLOCK_SIZE,
  1322. },
  1323. .setkey = aead_setkey,
  1324. .setauthsize = aead_setauthsize,
  1325. .encrypt = aead_encrypt,
  1326. .decrypt = aead_decrypt,
  1327. .ivsize = AES_BLOCK_SIZE,
  1328. .maxauthsize = SHA384_DIGEST_SIZE,
  1329. },
  1330. .caam = {
  1331. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1332. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1333. OP_ALG_AAI_HMAC_PRECOMP,
  1334. }
  1335. },
  1336. {
  1337. .aead = {
  1338. .base = {
  1339. .cra_name = "echainiv(authenc(hmac(sha384),"
  1340. "cbc(aes)))",
  1341. .cra_driver_name = "echainiv-authenc-"
  1342. "hmac-sha384-cbc-aes-"
  1343. "caam-qi",
  1344. .cra_blocksize = AES_BLOCK_SIZE,
  1345. },
  1346. .setkey = aead_setkey,
  1347. .setauthsize = aead_setauthsize,
  1348. .encrypt = aead_encrypt,
  1349. .decrypt = aead_decrypt,
  1350. .ivsize = AES_BLOCK_SIZE,
  1351. .maxauthsize = SHA384_DIGEST_SIZE,
  1352. },
  1353. .caam = {
  1354. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1355. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1356. OP_ALG_AAI_HMAC_PRECOMP,
  1357. .geniv = true,
  1358. }
  1359. },
  1360. {
  1361. .aead = {
  1362. .base = {
  1363. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  1364. .cra_driver_name = "authenc-hmac-sha512-"
  1365. "cbc-aes-caam-qi",
  1366. .cra_blocksize = AES_BLOCK_SIZE,
  1367. },
  1368. .setkey = aead_setkey,
  1369. .setauthsize = aead_setauthsize,
  1370. .encrypt = aead_encrypt,
  1371. .decrypt = aead_decrypt,
  1372. .ivsize = AES_BLOCK_SIZE,
  1373. .maxauthsize = SHA512_DIGEST_SIZE,
  1374. },
  1375. .caam = {
  1376. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1377. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1378. OP_ALG_AAI_HMAC_PRECOMP,
  1379. }
  1380. },
  1381. {
  1382. .aead = {
  1383. .base = {
  1384. .cra_name = "echainiv(authenc(hmac(sha512),"
  1385. "cbc(aes)))",
  1386. .cra_driver_name = "echainiv-authenc-"
  1387. "hmac-sha512-cbc-aes-"
  1388. "caam-qi",
  1389. .cra_blocksize = AES_BLOCK_SIZE,
  1390. },
  1391. .setkey = aead_setkey,
  1392. .setauthsize = aead_setauthsize,
  1393. .encrypt = aead_encrypt,
  1394. .decrypt = aead_decrypt,
  1395. .ivsize = AES_BLOCK_SIZE,
  1396. .maxauthsize = SHA512_DIGEST_SIZE,
  1397. },
  1398. .caam = {
  1399. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1400. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1401. OP_ALG_AAI_HMAC_PRECOMP,
  1402. .geniv = true,
  1403. }
  1404. },
  1405. {
  1406. .aead = {
  1407. .base = {
  1408. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  1409. .cra_driver_name = "authenc-hmac-md5-"
  1410. "cbc-des3_ede-caam-qi",
  1411. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1412. },
  1413. .setkey = aead_setkey,
  1414. .setauthsize = aead_setauthsize,
  1415. .encrypt = aead_encrypt,
  1416. .decrypt = aead_decrypt,
  1417. .ivsize = DES3_EDE_BLOCK_SIZE,
  1418. .maxauthsize = MD5_DIGEST_SIZE,
  1419. },
  1420. .caam = {
  1421. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1422. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1423. OP_ALG_AAI_HMAC_PRECOMP,
  1424. }
  1425. },
  1426. {
  1427. .aead = {
  1428. .base = {
  1429. .cra_name = "echainiv(authenc(hmac(md5),"
  1430. "cbc(des3_ede)))",
  1431. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1432. "cbc-des3_ede-caam-qi",
  1433. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1434. },
  1435. .setkey = aead_setkey,
  1436. .setauthsize = aead_setauthsize,
  1437. .encrypt = aead_encrypt,
  1438. .decrypt = aead_decrypt,
  1439. .ivsize = DES3_EDE_BLOCK_SIZE,
  1440. .maxauthsize = MD5_DIGEST_SIZE,
  1441. },
  1442. .caam = {
  1443. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1444. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1445. OP_ALG_AAI_HMAC_PRECOMP,
  1446. .geniv = true,
  1447. }
  1448. },
  1449. {
  1450. .aead = {
  1451. .base = {
  1452. .cra_name = "authenc(hmac(sha1),"
  1453. "cbc(des3_ede))",
  1454. .cra_driver_name = "authenc-hmac-sha1-"
  1455. "cbc-des3_ede-caam-qi",
  1456. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1457. },
  1458. .setkey = aead_setkey,
  1459. .setauthsize = aead_setauthsize,
  1460. .encrypt = aead_encrypt,
  1461. .decrypt = aead_decrypt,
  1462. .ivsize = DES3_EDE_BLOCK_SIZE,
  1463. .maxauthsize = SHA1_DIGEST_SIZE,
  1464. },
  1465. .caam = {
  1466. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1467. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1468. OP_ALG_AAI_HMAC_PRECOMP,
  1469. },
  1470. },
  1471. {
  1472. .aead = {
  1473. .base = {
  1474. .cra_name = "echainiv(authenc(hmac(sha1),"
  1475. "cbc(des3_ede)))",
  1476. .cra_driver_name = "echainiv-authenc-"
  1477. "hmac-sha1-"
  1478. "cbc-des3_ede-caam-qi",
  1479. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1480. },
  1481. .setkey = aead_setkey,
  1482. .setauthsize = aead_setauthsize,
  1483. .encrypt = aead_encrypt,
  1484. .decrypt = aead_decrypt,
  1485. .ivsize = DES3_EDE_BLOCK_SIZE,
  1486. .maxauthsize = SHA1_DIGEST_SIZE,
  1487. },
  1488. .caam = {
  1489. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1490. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1491. OP_ALG_AAI_HMAC_PRECOMP,
  1492. .geniv = true,
  1493. }
  1494. },
  1495. {
  1496. .aead = {
  1497. .base = {
  1498. .cra_name = "authenc(hmac(sha224),"
  1499. "cbc(des3_ede))",
  1500. .cra_driver_name = "authenc-hmac-sha224-"
  1501. "cbc-des3_ede-caam-qi",
  1502. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1503. },
  1504. .setkey = aead_setkey,
  1505. .setauthsize = aead_setauthsize,
  1506. .encrypt = aead_encrypt,
  1507. .decrypt = aead_decrypt,
  1508. .ivsize = DES3_EDE_BLOCK_SIZE,
  1509. .maxauthsize = SHA224_DIGEST_SIZE,
  1510. },
  1511. .caam = {
  1512. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1513. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1514. OP_ALG_AAI_HMAC_PRECOMP,
  1515. },
  1516. },
  1517. {
  1518. .aead = {
  1519. .base = {
  1520. .cra_name = "echainiv(authenc(hmac(sha224),"
  1521. "cbc(des3_ede)))",
  1522. .cra_driver_name = "echainiv-authenc-"
  1523. "hmac-sha224-"
  1524. "cbc-des3_ede-caam-qi",
  1525. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1526. },
  1527. .setkey = aead_setkey,
  1528. .setauthsize = aead_setauthsize,
  1529. .encrypt = aead_encrypt,
  1530. .decrypt = aead_decrypt,
  1531. .ivsize = DES3_EDE_BLOCK_SIZE,
  1532. .maxauthsize = SHA224_DIGEST_SIZE,
  1533. },
  1534. .caam = {
  1535. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1536. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1537. OP_ALG_AAI_HMAC_PRECOMP,
  1538. .geniv = true,
  1539. }
  1540. },
  1541. {
  1542. .aead = {
  1543. .base = {
  1544. .cra_name = "authenc(hmac(sha256),"
  1545. "cbc(des3_ede))",
  1546. .cra_driver_name = "authenc-hmac-sha256-"
  1547. "cbc-des3_ede-caam-qi",
  1548. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1549. },
  1550. .setkey = aead_setkey,
  1551. .setauthsize = aead_setauthsize,
  1552. .encrypt = aead_encrypt,
  1553. .decrypt = aead_decrypt,
  1554. .ivsize = DES3_EDE_BLOCK_SIZE,
  1555. .maxauthsize = SHA256_DIGEST_SIZE,
  1556. },
  1557. .caam = {
  1558. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1559. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1560. OP_ALG_AAI_HMAC_PRECOMP,
  1561. },
  1562. },
  1563. {
  1564. .aead = {
  1565. .base = {
  1566. .cra_name = "echainiv(authenc(hmac(sha256),"
  1567. "cbc(des3_ede)))",
  1568. .cra_driver_name = "echainiv-authenc-"
  1569. "hmac-sha256-"
  1570. "cbc-des3_ede-caam-qi",
  1571. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1572. },
  1573. .setkey = aead_setkey,
  1574. .setauthsize = aead_setauthsize,
  1575. .encrypt = aead_encrypt,
  1576. .decrypt = aead_decrypt,
  1577. .ivsize = DES3_EDE_BLOCK_SIZE,
  1578. .maxauthsize = SHA256_DIGEST_SIZE,
  1579. },
  1580. .caam = {
  1581. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1582. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1583. OP_ALG_AAI_HMAC_PRECOMP,
  1584. .geniv = true,
  1585. }
  1586. },
  1587. {
  1588. .aead = {
  1589. .base = {
  1590. .cra_name = "authenc(hmac(sha384),"
  1591. "cbc(des3_ede))",
  1592. .cra_driver_name = "authenc-hmac-sha384-"
  1593. "cbc-des3_ede-caam-qi",
  1594. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1595. },
  1596. .setkey = aead_setkey,
  1597. .setauthsize = aead_setauthsize,
  1598. .encrypt = aead_encrypt,
  1599. .decrypt = aead_decrypt,
  1600. .ivsize = DES3_EDE_BLOCK_SIZE,
  1601. .maxauthsize = SHA384_DIGEST_SIZE,
  1602. },
  1603. .caam = {
  1604. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1605. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1606. OP_ALG_AAI_HMAC_PRECOMP,
  1607. },
  1608. },
  1609. {
  1610. .aead = {
  1611. .base = {
  1612. .cra_name = "echainiv(authenc(hmac(sha384),"
  1613. "cbc(des3_ede)))",
  1614. .cra_driver_name = "echainiv-authenc-"
  1615. "hmac-sha384-"
  1616. "cbc-des3_ede-caam-qi",
  1617. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1618. },
  1619. .setkey = aead_setkey,
  1620. .setauthsize = aead_setauthsize,
  1621. .encrypt = aead_encrypt,
  1622. .decrypt = aead_decrypt,
  1623. .ivsize = DES3_EDE_BLOCK_SIZE,
  1624. .maxauthsize = SHA384_DIGEST_SIZE,
  1625. },
  1626. .caam = {
  1627. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1628. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1629. OP_ALG_AAI_HMAC_PRECOMP,
  1630. .geniv = true,
  1631. }
  1632. },
  1633. {
  1634. .aead = {
  1635. .base = {
  1636. .cra_name = "authenc(hmac(sha512),"
  1637. "cbc(des3_ede))",
  1638. .cra_driver_name = "authenc-hmac-sha512-"
  1639. "cbc-des3_ede-caam-qi",
  1640. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1641. },
  1642. .setkey = aead_setkey,
  1643. .setauthsize = aead_setauthsize,
  1644. .encrypt = aead_encrypt,
  1645. .decrypt = aead_decrypt,
  1646. .ivsize = DES3_EDE_BLOCK_SIZE,
  1647. .maxauthsize = SHA512_DIGEST_SIZE,
  1648. },
  1649. .caam = {
  1650. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1651. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1652. OP_ALG_AAI_HMAC_PRECOMP,
  1653. },
  1654. },
  1655. {
  1656. .aead = {
  1657. .base = {
  1658. .cra_name = "echainiv(authenc(hmac(sha512),"
  1659. "cbc(des3_ede)))",
  1660. .cra_driver_name = "echainiv-authenc-"
  1661. "hmac-sha512-"
  1662. "cbc-des3_ede-caam-qi",
  1663. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1664. },
  1665. .setkey = aead_setkey,
  1666. .setauthsize = aead_setauthsize,
  1667. .encrypt = aead_encrypt,
  1668. .decrypt = aead_decrypt,
  1669. .ivsize = DES3_EDE_BLOCK_SIZE,
  1670. .maxauthsize = SHA512_DIGEST_SIZE,
  1671. },
  1672. .caam = {
  1673. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1674. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1675. OP_ALG_AAI_HMAC_PRECOMP,
  1676. .geniv = true,
  1677. }
  1678. },
  1679. {
  1680. .aead = {
  1681. .base = {
  1682. .cra_name = "authenc(hmac(md5),cbc(des))",
  1683. .cra_driver_name = "authenc-hmac-md5-"
  1684. "cbc-des-caam-qi",
  1685. .cra_blocksize = DES_BLOCK_SIZE,
  1686. },
  1687. .setkey = aead_setkey,
  1688. .setauthsize = aead_setauthsize,
  1689. .encrypt = aead_encrypt,
  1690. .decrypt = aead_decrypt,
  1691. .ivsize = DES_BLOCK_SIZE,
  1692. .maxauthsize = MD5_DIGEST_SIZE,
  1693. },
  1694. .caam = {
  1695. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1696. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1697. OP_ALG_AAI_HMAC_PRECOMP,
  1698. },
  1699. },
  1700. {
  1701. .aead = {
  1702. .base = {
  1703. .cra_name = "echainiv(authenc(hmac(md5),"
  1704. "cbc(des)))",
  1705. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1706. "cbc-des-caam-qi",
  1707. .cra_blocksize = DES_BLOCK_SIZE,
  1708. },
  1709. .setkey = aead_setkey,
  1710. .setauthsize = aead_setauthsize,
  1711. .encrypt = aead_encrypt,
  1712. .decrypt = aead_decrypt,
  1713. .ivsize = DES_BLOCK_SIZE,
  1714. .maxauthsize = MD5_DIGEST_SIZE,
  1715. },
  1716. .caam = {
  1717. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1718. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1719. OP_ALG_AAI_HMAC_PRECOMP,
  1720. .geniv = true,
  1721. }
  1722. },
  1723. {
  1724. .aead = {
  1725. .base = {
  1726. .cra_name = "authenc(hmac(sha1),cbc(des))",
  1727. .cra_driver_name = "authenc-hmac-sha1-"
  1728. "cbc-des-caam-qi",
  1729. .cra_blocksize = DES_BLOCK_SIZE,
  1730. },
  1731. .setkey = aead_setkey,
  1732. .setauthsize = aead_setauthsize,
  1733. .encrypt = aead_encrypt,
  1734. .decrypt = aead_decrypt,
  1735. .ivsize = DES_BLOCK_SIZE,
  1736. .maxauthsize = SHA1_DIGEST_SIZE,
  1737. },
  1738. .caam = {
  1739. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1740. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1741. OP_ALG_AAI_HMAC_PRECOMP,
  1742. },
  1743. },
  1744. {
  1745. .aead = {
  1746. .base = {
  1747. .cra_name = "echainiv(authenc(hmac(sha1),"
  1748. "cbc(des)))",
  1749. .cra_driver_name = "echainiv-authenc-"
  1750. "hmac-sha1-cbc-des-caam-qi",
  1751. .cra_blocksize = DES_BLOCK_SIZE,
  1752. },
  1753. .setkey = aead_setkey,
  1754. .setauthsize = aead_setauthsize,
  1755. .encrypt = aead_encrypt,
  1756. .decrypt = aead_decrypt,
  1757. .ivsize = DES_BLOCK_SIZE,
  1758. .maxauthsize = SHA1_DIGEST_SIZE,
  1759. },
  1760. .caam = {
  1761. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1762. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1763. OP_ALG_AAI_HMAC_PRECOMP,
  1764. .geniv = true,
  1765. }
  1766. },
  1767. {
  1768. .aead = {
  1769. .base = {
  1770. .cra_name = "authenc(hmac(sha224),cbc(des))",
  1771. .cra_driver_name = "authenc-hmac-sha224-"
  1772. "cbc-des-caam-qi",
  1773. .cra_blocksize = DES_BLOCK_SIZE,
  1774. },
  1775. .setkey = aead_setkey,
  1776. .setauthsize = aead_setauthsize,
  1777. .encrypt = aead_encrypt,
  1778. .decrypt = aead_decrypt,
  1779. .ivsize = DES_BLOCK_SIZE,
  1780. .maxauthsize = SHA224_DIGEST_SIZE,
  1781. },
  1782. .caam = {
  1783. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1784. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1785. OP_ALG_AAI_HMAC_PRECOMP,
  1786. },
  1787. },
  1788. {
  1789. .aead = {
  1790. .base = {
  1791. .cra_name = "echainiv(authenc(hmac(sha224),"
  1792. "cbc(des)))",
  1793. .cra_driver_name = "echainiv-authenc-"
  1794. "hmac-sha224-cbc-des-"
  1795. "caam-qi",
  1796. .cra_blocksize = DES_BLOCK_SIZE,
  1797. },
  1798. .setkey = aead_setkey,
  1799. .setauthsize = aead_setauthsize,
  1800. .encrypt = aead_encrypt,
  1801. .decrypt = aead_decrypt,
  1802. .ivsize = DES_BLOCK_SIZE,
  1803. .maxauthsize = SHA224_DIGEST_SIZE,
  1804. },
  1805. .caam = {
  1806. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1807. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1808. OP_ALG_AAI_HMAC_PRECOMP,
  1809. .geniv = true,
  1810. }
  1811. },
  1812. {
  1813. .aead = {
  1814. .base = {
  1815. .cra_name = "authenc(hmac(sha256),cbc(des))",
  1816. .cra_driver_name = "authenc-hmac-sha256-"
  1817. "cbc-des-caam-qi",
  1818. .cra_blocksize = DES_BLOCK_SIZE,
  1819. },
  1820. .setkey = aead_setkey,
  1821. .setauthsize = aead_setauthsize,
  1822. .encrypt = aead_encrypt,
  1823. .decrypt = aead_decrypt,
  1824. .ivsize = DES_BLOCK_SIZE,
  1825. .maxauthsize = SHA256_DIGEST_SIZE,
  1826. },
  1827. .caam = {
  1828. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1829. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1830. OP_ALG_AAI_HMAC_PRECOMP,
  1831. },
  1832. },
  1833. {
  1834. .aead = {
  1835. .base = {
  1836. .cra_name = "echainiv(authenc(hmac(sha256),"
  1837. "cbc(des)))",
  1838. .cra_driver_name = "echainiv-authenc-"
  1839. "hmac-sha256-cbc-des-"
  1840. "caam-qi",
  1841. .cra_blocksize = DES_BLOCK_SIZE,
  1842. },
  1843. .setkey = aead_setkey,
  1844. .setauthsize = aead_setauthsize,
  1845. .encrypt = aead_encrypt,
  1846. .decrypt = aead_decrypt,
  1847. .ivsize = DES_BLOCK_SIZE,
  1848. .maxauthsize = SHA256_DIGEST_SIZE,
  1849. },
  1850. .caam = {
  1851. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1852. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1853. OP_ALG_AAI_HMAC_PRECOMP,
  1854. .geniv = true,
  1855. },
  1856. },
  1857. {
  1858. .aead = {
  1859. .base = {
  1860. .cra_name = "authenc(hmac(sha384),cbc(des))",
  1861. .cra_driver_name = "authenc-hmac-sha384-"
  1862. "cbc-des-caam-qi",
  1863. .cra_blocksize = DES_BLOCK_SIZE,
  1864. },
  1865. .setkey = aead_setkey,
  1866. .setauthsize = aead_setauthsize,
  1867. .encrypt = aead_encrypt,
  1868. .decrypt = aead_decrypt,
  1869. .ivsize = DES_BLOCK_SIZE,
  1870. .maxauthsize = SHA384_DIGEST_SIZE,
  1871. },
  1872. .caam = {
  1873. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1874. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1875. OP_ALG_AAI_HMAC_PRECOMP,
  1876. },
  1877. },
  1878. {
  1879. .aead = {
  1880. .base = {
  1881. .cra_name = "echainiv(authenc(hmac(sha384),"
  1882. "cbc(des)))",
  1883. .cra_driver_name = "echainiv-authenc-"
  1884. "hmac-sha384-cbc-des-"
  1885. "caam-qi",
  1886. .cra_blocksize = DES_BLOCK_SIZE,
  1887. },
  1888. .setkey = aead_setkey,
  1889. .setauthsize = aead_setauthsize,
  1890. .encrypt = aead_encrypt,
  1891. .decrypt = aead_decrypt,
  1892. .ivsize = DES_BLOCK_SIZE,
  1893. .maxauthsize = SHA384_DIGEST_SIZE,
  1894. },
  1895. .caam = {
  1896. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1897. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1898. OP_ALG_AAI_HMAC_PRECOMP,
  1899. .geniv = true,
  1900. }
  1901. },
  1902. {
  1903. .aead = {
  1904. .base = {
  1905. .cra_name = "authenc(hmac(sha512),cbc(des))",
  1906. .cra_driver_name = "authenc-hmac-sha512-"
  1907. "cbc-des-caam-qi",
  1908. .cra_blocksize = DES_BLOCK_SIZE,
  1909. },
  1910. .setkey = aead_setkey,
  1911. .setauthsize = aead_setauthsize,
  1912. .encrypt = aead_encrypt,
  1913. .decrypt = aead_decrypt,
  1914. .ivsize = DES_BLOCK_SIZE,
  1915. .maxauthsize = SHA512_DIGEST_SIZE,
  1916. },
  1917. .caam = {
  1918. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1919. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1920. OP_ALG_AAI_HMAC_PRECOMP,
  1921. }
  1922. },
  1923. {
  1924. .aead = {
  1925. .base = {
  1926. .cra_name = "echainiv(authenc(hmac(sha512),"
  1927. "cbc(des)))",
  1928. .cra_driver_name = "echainiv-authenc-"
  1929. "hmac-sha512-cbc-des-"
  1930. "caam-qi",
  1931. .cra_blocksize = DES_BLOCK_SIZE,
  1932. },
  1933. .setkey = aead_setkey,
  1934. .setauthsize = aead_setauthsize,
  1935. .encrypt = aead_encrypt,
  1936. .decrypt = aead_decrypt,
  1937. .ivsize = DES_BLOCK_SIZE,
  1938. .maxauthsize = SHA512_DIGEST_SIZE,
  1939. },
  1940. .caam = {
  1941. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1942. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1943. OP_ALG_AAI_HMAC_PRECOMP,
  1944. .geniv = true,
  1945. }
  1946. },
  1947. };
  1948. struct caam_crypto_alg {
  1949. struct list_head entry;
  1950. struct crypto_alg crypto_alg;
  1951. struct caam_alg_entry caam;
  1952. };
  1953. static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
  1954. {
  1955. struct caam_drv_private *priv;
  1956. /*
  1957. * distribute tfms across job rings to ensure in-order
  1958. * crypto request processing per tfm
  1959. */
  1960. ctx->jrdev = caam_jr_alloc();
  1961. if (IS_ERR(ctx->jrdev)) {
  1962. pr_err("Job Ring Device allocation for transform failed\n");
  1963. return PTR_ERR(ctx->jrdev);
  1964. }
  1965. ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
  1966. DMA_TO_DEVICE);
  1967. if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
  1968. dev_err(ctx->jrdev, "unable to map key\n");
  1969. caam_jr_free(ctx->jrdev);
  1970. return -ENOMEM;
  1971. }
  1972. /* copy descriptor header template value */
  1973. ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  1974. ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  1975. priv = dev_get_drvdata(ctx->jrdev->parent);
  1976. ctx->qidev = priv->qidev;
  1977. spin_lock_init(&ctx->lock);
  1978. ctx->drv_ctx[ENCRYPT] = NULL;
  1979. ctx->drv_ctx[DECRYPT] = NULL;
  1980. ctx->drv_ctx[GIVENCRYPT] = NULL;
  1981. return 0;
  1982. }
  1983. static int caam_cra_init(struct crypto_tfm *tfm)
  1984. {
  1985. struct crypto_alg *alg = tfm->__crt_alg;
  1986. struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
  1987. crypto_alg);
  1988. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  1989. return caam_init_common(ctx, &caam_alg->caam);
  1990. }
  1991. static int caam_aead_init(struct crypto_aead *tfm)
  1992. {
  1993. struct aead_alg *alg = crypto_aead_alg(tfm);
  1994. struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
  1995. aead);
  1996. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  1997. return caam_init_common(ctx, &caam_alg->caam);
  1998. }
  1999. static void caam_exit_common(struct caam_ctx *ctx)
  2000. {
  2001. caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
  2002. caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
  2003. caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
  2004. dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
  2005. DMA_TO_DEVICE);
  2006. caam_jr_free(ctx->jrdev);
  2007. }
  2008. static void caam_cra_exit(struct crypto_tfm *tfm)
  2009. {
  2010. caam_exit_common(crypto_tfm_ctx(tfm));
  2011. }
  2012. static void caam_aead_exit(struct crypto_aead *tfm)
  2013. {
  2014. caam_exit_common(crypto_aead_ctx(tfm));
  2015. }
  2016. static struct list_head alg_list;
  2017. static void __exit caam_qi_algapi_exit(void)
  2018. {
  2019. struct caam_crypto_alg *t_alg, *n;
  2020. int i;
  2021. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  2022. struct caam_aead_alg *t_alg = driver_aeads + i;
  2023. if (t_alg->registered)
  2024. crypto_unregister_aead(&t_alg->aead);
  2025. }
  2026. if (!alg_list.next)
  2027. return;
  2028. list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
  2029. crypto_unregister_alg(&t_alg->crypto_alg);
  2030. list_del(&t_alg->entry);
  2031. kfree(t_alg);
  2032. }
  2033. }
  2034. static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
  2035. *template)
  2036. {
  2037. struct caam_crypto_alg *t_alg;
  2038. struct crypto_alg *alg;
  2039. t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
  2040. if (!t_alg)
  2041. return ERR_PTR(-ENOMEM);
  2042. alg = &t_alg->crypto_alg;
  2043. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
  2044. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  2045. template->driver_name);
  2046. alg->cra_module = THIS_MODULE;
  2047. alg->cra_init = caam_cra_init;
  2048. alg->cra_exit = caam_cra_exit;
  2049. alg->cra_priority = CAAM_CRA_PRIORITY;
  2050. alg->cra_blocksize = template->blocksize;
  2051. alg->cra_alignmask = 0;
  2052. alg->cra_ctxsize = sizeof(struct caam_ctx);
  2053. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
  2054. template->type;
  2055. switch (template->type) {
  2056. case CRYPTO_ALG_TYPE_GIVCIPHER:
  2057. alg->cra_type = &crypto_givcipher_type;
  2058. alg->cra_ablkcipher = template->template_ablkcipher;
  2059. break;
  2060. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  2061. alg->cra_type = &crypto_ablkcipher_type;
  2062. alg->cra_ablkcipher = template->template_ablkcipher;
  2063. break;
  2064. }
  2065. t_alg->caam.class1_alg_type = template->class1_alg_type;
  2066. t_alg->caam.class2_alg_type = template->class2_alg_type;
  2067. return t_alg;
  2068. }
  2069. static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  2070. {
  2071. struct aead_alg *alg = &t_alg->aead;
  2072. alg->base.cra_module = THIS_MODULE;
  2073. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  2074. alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  2075. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  2076. alg->init = caam_aead_init;
  2077. alg->exit = caam_aead_exit;
  2078. }
  2079. static int __init caam_qi_algapi_init(void)
  2080. {
  2081. struct device_node *dev_node;
  2082. struct platform_device *pdev;
  2083. struct device *ctrldev;
  2084. struct caam_drv_private *priv;
  2085. int i = 0, err = 0;
  2086. u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
  2087. unsigned int md_limit = SHA512_DIGEST_SIZE;
  2088. bool registered = false;
  2089. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  2090. if (!dev_node) {
  2091. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  2092. if (!dev_node)
  2093. return -ENODEV;
  2094. }
  2095. pdev = of_find_device_by_node(dev_node);
  2096. of_node_put(dev_node);
  2097. if (!pdev)
  2098. return -ENODEV;
  2099. ctrldev = &pdev->dev;
  2100. priv = dev_get_drvdata(ctrldev);
  2101. /*
  2102. * If priv is NULL, it's probably because the caam driver wasn't
  2103. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  2104. */
  2105. if (!priv || !priv->qi_present)
  2106. return -ENODEV;
  2107. INIT_LIST_HEAD(&alg_list);
  2108. /*
  2109. * Register crypto algorithms the device supports.
  2110. * First, detect presence and attributes of DES, AES, and MD blocks.
  2111. */
  2112. cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
  2113. cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
  2114. des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
  2115. aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
  2116. md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  2117. /* If MD is present, limit digest size based on LP256 */
  2118. if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
  2119. md_limit = SHA256_DIGEST_SIZE;
  2120. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  2121. struct caam_crypto_alg *t_alg;
  2122. struct caam_alg_template *alg = driver_algs + i;
  2123. u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
  2124. /* Skip DES algorithms if not supported by device */
  2125. if (!des_inst &&
  2126. ((alg_sel == OP_ALG_ALGSEL_3DES) ||
  2127. (alg_sel == OP_ALG_ALGSEL_DES)))
  2128. continue;
  2129. /* Skip AES algorithms if not supported by device */
  2130. if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
  2131. continue;
  2132. t_alg = caam_alg_alloc(alg);
  2133. if (IS_ERR(t_alg)) {
  2134. err = PTR_ERR(t_alg);
  2135. dev_warn(priv->qidev, "%s alg allocation failed\n",
  2136. alg->driver_name);
  2137. continue;
  2138. }
  2139. err = crypto_register_alg(&t_alg->crypto_alg);
  2140. if (err) {
  2141. dev_warn(priv->qidev, "%s alg registration failed\n",
  2142. t_alg->crypto_alg.cra_driver_name);
  2143. kfree(t_alg);
  2144. continue;
  2145. }
  2146. list_add_tail(&t_alg->entry, &alg_list);
  2147. registered = true;
  2148. }
  2149. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  2150. struct caam_aead_alg *t_alg = driver_aeads + i;
  2151. u32 c1_alg_sel = t_alg->caam.class1_alg_type &
  2152. OP_ALG_ALGSEL_MASK;
  2153. u32 c2_alg_sel = t_alg->caam.class2_alg_type &
  2154. OP_ALG_ALGSEL_MASK;
  2155. u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
  2156. /* Skip DES algorithms if not supported by device */
  2157. if (!des_inst &&
  2158. ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
  2159. (c1_alg_sel == OP_ALG_ALGSEL_DES)))
  2160. continue;
  2161. /* Skip AES algorithms if not supported by device */
  2162. if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
  2163. continue;
  2164. /*
  2165. * Check support for AES algorithms not available
  2166. * on LP devices.
  2167. */
  2168. if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
  2169. (alg_aai == OP_ALG_AAI_GCM))
  2170. continue;
  2171. /*
  2172. * Skip algorithms requiring message digests
  2173. * if MD or MD size is not supported by device.
  2174. */
  2175. if (c2_alg_sel &&
  2176. (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
  2177. continue;
  2178. caam_aead_alg_init(t_alg);
  2179. err = crypto_register_aead(&t_alg->aead);
  2180. if (err) {
  2181. pr_warn("%s alg registration failed\n",
  2182. t_alg->aead.base.cra_driver_name);
  2183. continue;
  2184. }
  2185. t_alg->registered = true;
  2186. registered = true;
  2187. }
  2188. if (registered)
  2189. dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
  2190. return err;
  2191. }
  2192. module_init(caam_qi_algapi_init);
  2193. module_exit(caam_qi_algapi_exit);
  2194. MODULE_LICENSE("GPL");
  2195. MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
  2196. MODULE_AUTHOR("Freescale Semiconductor");