caamalg.c 130 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690
  1. /*
  2. * caam - Freescale FSL CAAM support for crypto API
  3. *
  4. * Copyright 2008-2011 Freescale Semiconductor, Inc.
  5. *
  6. * Based on talitos crypto API driver.
  7. *
  8. * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
  9. *
  10. * --------------- ---------------
  11. * | JobDesc #1 |-------------------->| ShareDesc |
  12. * | *(packet 1) | | (PDB) |
  13. * --------------- |------------->| (hashKey) |
  14. * . | | (cipherKey) |
  15. * . | |-------->| (operation) |
  16. * --------------- | | ---------------
  17. * | JobDesc #2 |------| |
  18. * | *(packet 2) | |
  19. * --------------- |
  20. * . |
  21. * . |
  22. * --------------- |
  23. * | JobDesc #3 |------------
  24. * | *(packet 3) |
  25. * ---------------
  26. *
  27. * The SharedDesc never changes for a connection unless rekeyed, but
  28. * each packet will likely be in a different place. So all we need
  29. * to know to process the packet is where the input is, where the
  30. * output goes, and what context we want to process with. Context is
  31. * in the SharedDesc, packet references in the JobDesc.
  32. *
  33. * So, a job desc looks like:
  34. *
  35. * ---------------------
  36. * | Header |
  37. * | ShareDesc Pointer |
  38. * | SEQ_OUT_PTR |
  39. * | (output buffer) |
  40. * | (output length) |
  41. * | SEQ_IN_PTR |
  42. * | (input buffer) |
  43. * | (input length) |
  44. * ---------------------
  45. */
  46. #include "compat.h"
  47. #include "regs.h"
  48. #include "intern.h"
  49. #include "desc_constr.h"
  50. #include "jr.h"
  51. #include "error.h"
  52. #include "sg_sw_sec4.h"
  53. #include "key_gen.h"
  54. /*
  55. * crypto alg
  56. */
  57. #define CAAM_CRA_PRIORITY 3000
  58. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  59. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  60. CTR_RFC3686_NONCE_SIZE + \
  61. SHA512_DIGEST_SIZE * 2)
  62. /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
  63. #define CAAM_MAX_IV_LENGTH 16
  64. #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2)
  65. #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  66. CAAM_CMD_SZ * 4)
  67. #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
  68. CAAM_CMD_SZ * 5)
  69. /* length of descriptors text */
  70. #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
  71. #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
  72. #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
  73. #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 9 * CAAM_CMD_SZ)
  74. /* Note: Nonce is counted in enckeylen */
  75. #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
  76. #define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
  77. #define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 11 * CAAM_CMD_SZ)
  78. #define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 13 * CAAM_CMD_SZ)
  79. #define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
  80. #define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 16 * CAAM_CMD_SZ)
  81. #define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 12 * CAAM_CMD_SZ)
  82. #define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
  83. #define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
  84. #define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 13 * CAAM_CMD_SZ)
  85. #define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
  86. #define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 11 * CAAM_CMD_SZ)
  87. #define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 12 * CAAM_CMD_SZ)
  88. #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
  89. #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
  90. 20 * CAAM_CMD_SZ)
  91. #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
  92. 15 * CAAM_CMD_SZ)
  93. #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
  94. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  95. #ifdef DEBUG
  96. /* for print_hex_dumps with line references */
  97. #define debug(format, arg...) printk(format, arg)
  98. #else
  99. #define debug(format, arg...)
  100. #endif
  101. #ifdef DEBUG
  102. #include <linux/highmem.h>
  103. static void dbg_dump_sg(const char *level, const char *prefix_str,
  104. int prefix_type, int rowsize, int groupsize,
  105. struct scatterlist *sg, size_t tlen, bool ascii,
  106. bool may_sleep)
  107. {
  108. struct scatterlist *it;
  109. void *it_page;
  110. size_t len;
  111. void *buf;
  112. for (it = sg; it != NULL && tlen > 0 ; it = sg_next(sg)) {
  113. /*
  114. * make sure the scatterlist's page
  115. * has a valid virtual memory mapping
  116. */
  117. it_page = kmap_atomic(sg_page(it));
  118. if (unlikely(!it_page)) {
  119. printk(KERN_ERR "dbg_dump_sg: kmap failed\n");
  120. return;
  121. }
  122. buf = it_page + it->offset;
  123. len = min_t(size_t, tlen, it->length);
  124. print_hex_dump(level, prefix_str, prefix_type, rowsize,
  125. groupsize, buf, len, ascii);
  126. tlen -= len;
  127. kunmap_atomic(it_page);
  128. }
  129. }
  130. #endif
  131. static struct list_head alg_list;
  132. struct caam_alg_entry {
  133. int class1_alg_type;
  134. int class2_alg_type;
  135. int alg_op;
  136. bool rfc3686;
  137. bool geniv;
  138. };
  139. struct caam_aead_alg {
  140. struct aead_alg aead;
  141. struct caam_alg_entry caam;
  142. bool registered;
  143. };
  144. /* Set DK bit in class 1 operation if shared */
  145. static inline void append_dec_op1(u32 *desc, u32 type)
  146. {
  147. u32 *jump_cmd, *uncond_jump_cmd;
  148. /* DK bit is valid only for AES */
  149. if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
  150. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  151. OP_ALG_DECRYPT);
  152. return;
  153. }
  154. jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
  155. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  156. OP_ALG_DECRYPT);
  157. uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  158. set_jump_tgt_here(desc, jump_cmd);
  159. append_operation(desc, type | OP_ALG_AS_INITFINAL |
  160. OP_ALG_DECRYPT | OP_ALG_AAI_DK);
  161. set_jump_tgt_here(desc, uncond_jump_cmd);
  162. }
  163. /*
  164. * For aead functions, read payload and write payload,
  165. * both of which are specified in req->src and req->dst
  166. */
  167. static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
  168. {
  169. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  170. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
  171. KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
  172. }
  173. /*
  174. * For ablkcipher encrypt and decrypt, read from req->src and
  175. * write to req->dst
  176. */
  177. static inline void ablkcipher_append_src_dst(u32 *desc)
  178. {
  179. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  180. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  181. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
  182. KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  183. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  184. }
  185. /*
  186. * per-session context
  187. */
  188. struct caam_ctx {
  189. struct device *jrdev;
  190. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  191. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  192. u32 sh_desc_givenc[DESC_MAX_USED_LEN];
  193. dma_addr_t sh_desc_enc_dma;
  194. dma_addr_t sh_desc_dec_dma;
  195. dma_addr_t sh_desc_givenc_dma;
  196. u32 class1_alg_type;
  197. u32 class2_alg_type;
  198. u32 alg_op;
  199. u8 key[CAAM_MAX_KEY_SIZE];
  200. dma_addr_t key_dma;
  201. unsigned int enckeylen;
  202. unsigned int split_key_len;
  203. unsigned int split_key_pad_len;
  204. unsigned int authsize;
  205. };
  206. static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
  207. int keys_fit_inline, bool is_rfc3686)
  208. {
  209. u32 *nonce;
  210. unsigned int enckeylen = ctx->enckeylen;
  211. /*
  212. * RFC3686 specific:
  213. * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
  214. * | enckeylen = encryption key size + nonce size
  215. */
  216. if (is_rfc3686)
  217. enckeylen -= CTR_RFC3686_NONCE_SIZE;
  218. if (keys_fit_inline) {
  219. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  220. ctx->split_key_len, CLASS_2 |
  221. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  222. append_key_as_imm(desc, (void *)ctx->key +
  223. ctx->split_key_pad_len, enckeylen,
  224. enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  225. } else {
  226. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  227. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  228. append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
  229. enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  230. }
  231. /* Load Counter into CONTEXT1 reg */
  232. if (is_rfc3686) {
  233. nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
  234. enckeylen);
  235. append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
  236. LDST_CLASS_IND_CCB |
  237. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  238. append_move(desc,
  239. MOVE_SRC_OUTFIFO |
  240. MOVE_DEST_CLASS1CTX |
  241. (16 << MOVE_OFFSET_SHIFT) |
  242. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  243. }
  244. }
  245. static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
  246. int keys_fit_inline, bool is_rfc3686)
  247. {
  248. u32 *key_jump_cmd;
  249. /* Note: Context registers are saved. */
  250. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  251. /* Skip if already shared */
  252. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  253. JUMP_COND_SHRD);
  254. append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  255. set_jump_tgt_here(desc, key_jump_cmd);
  256. }
  257. static int aead_null_set_sh_desc(struct crypto_aead *aead)
  258. {
  259. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  260. struct device *jrdev = ctx->jrdev;
  261. bool keys_fit_inline = false;
  262. u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
  263. u32 *desc;
  264. /*
  265. * Job Descriptor and Shared Descriptors
  266. * must all fit into the 64-word Descriptor h/w Buffer
  267. */
  268. if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
  269. ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
  270. keys_fit_inline = true;
  271. /* aead_encrypt shared descriptor */
  272. desc = ctx->sh_desc_enc;
  273. init_sh_desc(desc, HDR_SHARE_SERIAL);
  274. /* Skip if already shared */
  275. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  276. JUMP_COND_SHRD);
  277. if (keys_fit_inline)
  278. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  279. ctx->split_key_len, CLASS_2 |
  280. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  281. else
  282. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  283. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  284. set_jump_tgt_here(desc, key_jump_cmd);
  285. /* assoclen + cryptlen = seqinlen */
  286. append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
  287. /* Prepare to read and write cryptlen + assoclen bytes */
  288. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  289. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  290. /*
  291. * MOVE_LEN opcode is not available in all SEC HW revisions,
  292. * thus need to do some magic, i.e. self-patch the descriptor
  293. * buffer.
  294. */
  295. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
  296. MOVE_DEST_MATH3 |
  297. (0x6 << MOVE_LEN_SHIFT));
  298. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
  299. MOVE_DEST_DESCBUF |
  300. MOVE_WAITCOMP |
  301. (0x8 << MOVE_LEN_SHIFT));
  302. /* Class 2 operation */
  303. append_operation(desc, ctx->class2_alg_type |
  304. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  305. /* Read and write cryptlen bytes */
  306. aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  307. set_move_tgt_here(desc, read_move_cmd);
  308. set_move_tgt_here(desc, write_move_cmd);
  309. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  310. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
  311. MOVE_AUX_LS);
  312. /* Write ICV */
  313. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  314. LDST_SRCDST_BYTE_CONTEXT);
  315. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  316. desc_bytes(desc),
  317. DMA_TO_DEVICE);
  318. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  319. dev_err(jrdev, "unable to map shared descriptor\n");
  320. return -ENOMEM;
  321. }
  322. #ifdef DEBUG
  323. print_hex_dump(KERN_ERR,
  324. "aead null enc shdesc@"__stringify(__LINE__)": ",
  325. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  326. desc_bytes(desc), 1);
  327. #endif
  328. /*
  329. * Job Descriptor and Shared Descriptors
  330. * must all fit into the 64-word Descriptor h/w Buffer
  331. */
  332. keys_fit_inline = false;
  333. if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
  334. ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
  335. keys_fit_inline = true;
  336. desc = ctx->sh_desc_dec;
  337. /* aead_decrypt shared descriptor */
  338. init_sh_desc(desc, HDR_SHARE_SERIAL);
  339. /* Skip if already shared */
  340. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  341. JUMP_COND_SHRD);
  342. if (keys_fit_inline)
  343. append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
  344. ctx->split_key_len, CLASS_2 |
  345. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  346. else
  347. append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
  348. KEY_DEST_MDHA_SPLIT | KEY_ENC);
  349. set_jump_tgt_here(desc, key_jump_cmd);
  350. /* Class 2 operation */
  351. append_operation(desc, ctx->class2_alg_type |
  352. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  353. /* assoclen + cryptlen = seqoutlen */
  354. append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  355. /* Prepare to read and write cryptlen + assoclen bytes */
  356. append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
  357. append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
  358. /*
  359. * MOVE_LEN opcode is not available in all SEC HW revisions,
  360. * thus need to do some magic, i.e. self-patch the descriptor
  361. * buffer.
  362. */
  363. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
  364. MOVE_DEST_MATH2 |
  365. (0x6 << MOVE_LEN_SHIFT));
  366. write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
  367. MOVE_DEST_DESCBUF |
  368. MOVE_WAITCOMP |
  369. (0x8 << MOVE_LEN_SHIFT));
  370. /* Read and write cryptlen bytes */
  371. aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  372. /*
  373. * Insert a NOP here, since we need at least 4 instructions between
  374. * code patching the descriptor buffer and the location being patched.
  375. */
  376. jump_cmd = append_jump(desc, JUMP_TEST_ALL);
  377. set_jump_tgt_here(desc, jump_cmd);
  378. set_move_tgt_here(desc, read_move_cmd);
  379. set_move_tgt_here(desc, write_move_cmd);
  380. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  381. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
  382. MOVE_AUX_LS);
  383. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  384. /* Load ICV */
  385. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  386. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  387. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  388. desc_bytes(desc),
  389. DMA_TO_DEVICE);
  390. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  391. dev_err(jrdev, "unable to map shared descriptor\n");
  392. return -ENOMEM;
  393. }
  394. #ifdef DEBUG
  395. print_hex_dump(KERN_ERR,
  396. "aead null dec shdesc@"__stringify(__LINE__)": ",
  397. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  398. desc_bytes(desc), 1);
  399. #endif
  400. return 0;
  401. }
  402. static int aead_set_sh_desc(struct crypto_aead *aead)
  403. {
  404. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  405. struct caam_aead_alg, aead);
  406. unsigned int ivsize = crypto_aead_ivsize(aead);
  407. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  408. struct device *jrdev = ctx->jrdev;
  409. bool keys_fit_inline;
  410. u32 geniv, moveiv;
  411. u32 ctx1_iv_off = 0;
  412. u32 *desc;
  413. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  414. OP_ALG_AAI_CTR_MOD128);
  415. const bool is_rfc3686 = alg->caam.rfc3686;
  416. if (!ctx->authsize)
  417. return 0;
  418. /* NULL encryption / decryption */
  419. if (!ctx->enckeylen)
  420. return aead_null_set_sh_desc(aead);
  421. /*
  422. * AES-CTR needs to load IV in CONTEXT1 reg
  423. * at an offset of 128bits (16bytes)
  424. * CONTEXT1[255:128] = IV
  425. */
  426. if (ctr_mode)
  427. ctx1_iv_off = 16;
  428. /*
  429. * RFC3686 specific:
  430. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  431. */
  432. if (is_rfc3686)
  433. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  434. if (alg->caam.geniv)
  435. goto skip_enc;
  436. /*
  437. * Job Descriptor and Shared Descriptors
  438. * must all fit into the 64-word Descriptor h/w Buffer
  439. */
  440. keys_fit_inline = false;
  441. if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
  442. ctx->split_key_pad_len + ctx->enckeylen +
  443. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  444. CAAM_DESC_BYTES_MAX)
  445. keys_fit_inline = true;
  446. /* aead_encrypt shared descriptor */
  447. desc = ctx->sh_desc_enc;
  448. /* Note: Context registers are saved. */
  449. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  450. /* Class 2 operation */
  451. append_operation(desc, ctx->class2_alg_type |
  452. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  453. /* Read and write assoclen bytes */
  454. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  455. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  456. /* Skip assoc data */
  457. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  458. /* read assoc before reading payload */
  459. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  460. FIFOLDST_VLF);
  461. /* Load Counter into CONTEXT1 reg */
  462. if (is_rfc3686)
  463. append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
  464. LDST_SRCDST_BYTE_CONTEXT |
  465. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  466. LDST_OFFSET_SHIFT));
  467. /* Class 1 operation */
  468. append_operation(desc, ctx->class1_alg_type |
  469. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  470. /* Read and write cryptlen bytes */
  471. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  472. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  473. aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
  474. /* Write ICV */
  475. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  476. LDST_SRCDST_BYTE_CONTEXT);
  477. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  478. desc_bytes(desc),
  479. DMA_TO_DEVICE);
  480. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  481. dev_err(jrdev, "unable to map shared descriptor\n");
  482. return -ENOMEM;
  483. }
  484. #ifdef DEBUG
  485. print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
  486. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  487. desc_bytes(desc), 1);
  488. #endif
  489. skip_enc:
  490. /*
  491. * Job Descriptor and Shared Descriptors
  492. * must all fit into the 64-word Descriptor h/w Buffer
  493. */
  494. keys_fit_inline = false;
  495. if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
  496. ctx->split_key_pad_len + ctx->enckeylen +
  497. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  498. CAAM_DESC_BYTES_MAX)
  499. keys_fit_inline = true;
  500. /* aead_decrypt shared descriptor */
  501. desc = ctx->sh_desc_dec;
  502. /* Note: Context registers are saved. */
  503. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  504. /* Class 2 operation */
  505. append_operation(desc, ctx->class2_alg_type |
  506. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  507. /* Read and write assoclen bytes */
  508. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  509. if (alg->caam.geniv)
  510. append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
  511. else
  512. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  513. /* Skip assoc data */
  514. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  515. /* read assoc before reading payload */
  516. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  517. KEY_VLF);
  518. if (alg->caam.geniv) {
  519. append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
  520. LDST_SRCDST_BYTE_CONTEXT |
  521. (ctx1_iv_off << LDST_OFFSET_SHIFT));
  522. append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
  523. (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize);
  524. }
  525. /* Load Counter into CONTEXT1 reg */
  526. if (is_rfc3686)
  527. append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
  528. LDST_SRCDST_BYTE_CONTEXT |
  529. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  530. LDST_OFFSET_SHIFT));
  531. /* Choose operation */
  532. if (ctr_mode)
  533. append_operation(desc, ctx->class1_alg_type |
  534. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
  535. else
  536. append_dec_op1(desc, ctx->class1_alg_type);
  537. /* Read and write cryptlen bytes */
  538. append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  539. append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  540. aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
  541. /* Load ICV */
  542. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
  543. FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
  544. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  545. desc_bytes(desc),
  546. DMA_TO_DEVICE);
  547. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  548. dev_err(jrdev, "unable to map shared descriptor\n");
  549. return -ENOMEM;
  550. }
  551. #ifdef DEBUG
  552. print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
  553. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  554. desc_bytes(desc), 1);
  555. #endif
  556. if (!alg->caam.geniv)
  557. goto skip_givenc;
  558. /*
  559. * Job Descriptor and Shared Descriptors
  560. * must all fit into the 64-word Descriptor h/w Buffer
  561. */
  562. keys_fit_inline = false;
  563. if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
  564. ctx->split_key_pad_len + ctx->enckeylen +
  565. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
  566. CAAM_DESC_BYTES_MAX)
  567. keys_fit_inline = true;
  568. /* aead_givencrypt shared descriptor */
  569. desc = ctx->sh_desc_enc;
  570. /* Note: Context registers are saved. */
  571. init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
  572. if (is_rfc3686)
  573. goto copy_iv;
  574. /* Generate IV */
  575. geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
  576. NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
  577. NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT);
  578. append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
  579. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  580. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  581. append_move(desc, MOVE_WAITCOMP |
  582. MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
  583. (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
  584. (ivsize << MOVE_LEN_SHIFT));
  585. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  586. copy_iv:
  587. /* Copy IV to class 1 context */
  588. append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
  589. (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
  590. (ivsize << MOVE_LEN_SHIFT));
  591. /* Return to encryption */
  592. append_operation(desc, ctx->class2_alg_type |
  593. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  594. /* Read and write assoclen bytes */
  595. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  596. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  597. /* ivsize + cryptlen = seqoutlen - authsize */
  598. append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
  599. /* Skip assoc data */
  600. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  601. /* read assoc before reading payload */
  602. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
  603. KEY_VLF);
  604. /* Copy iv from outfifo to class 2 fifo */
  605. moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
  606. NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT);
  607. append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
  608. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  609. append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB |
  610. LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
  611. /* Load Counter into CONTEXT1 reg */
  612. if (is_rfc3686)
  613. append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
  614. LDST_SRCDST_BYTE_CONTEXT |
  615. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  616. LDST_OFFSET_SHIFT));
  617. /* Class 1 operation */
  618. append_operation(desc, ctx->class1_alg_type |
  619. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  620. /* Will write ivsize + cryptlen */
  621. append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  622. /* Not need to reload iv */
  623. append_seq_fifo_load(desc, ivsize,
  624. FIFOLD_CLASS_SKIP);
  625. /* Will read cryptlen */
  626. append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  627. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
  628. FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
  629. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
  630. /* Write ICV */
  631. append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
  632. LDST_SRCDST_BYTE_CONTEXT);
  633. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  634. desc_bytes(desc),
  635. DMA_TO_DEVICE);
  636. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  637. dev_err(jrdev, "unable to map shared descriptor\n");
  638. return -ENOMEM;
  639. }
  640. #ifdef DEBUG
  641. print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
  642. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  643. desc_bytes(desc), 1);
  644. #endif
  645. skip_givenc:
  646. return 0;
  647. }
  648. static int aead_setauthsize(struct crypto_aead *authenc,
  649. unsigned int authsize)
  650. {
  651. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  652. ctx->authsize = authsize;
  653. aead_set_sh_desc(authenc);
  654. return 0;
  655. }
  656. static int gcm_set_sh_desc(struct crypto_aead *aead)
  657. {
  658. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  659. struct device *jrdev = ctx->jrdev;
  660. bool keys_fit_inline = false;
  661. u32 *key_jump_cmd, *zero_payload_jump_cmd,
  662. *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
  663. u32 *desc;
  664. if (!ctx->enckeylen || !ctx->authsize)
  665. return 0;
  666. /*
  667. * AES GCM encrypt shared descriptor
  668. * Job Descriptor and Shared Descriptor
  669. * must fit into the 64-word Descriptor h/w Buffer
  670. */
  671. if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  672. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  673. keys_fit_inline = true;
  674. desc = ctx->sh_desc_enc;
  675. init_sh_desc(desc, HDR_SHARE_SERIAL);
  676. /* skip key loading if they are loaded due to sharing */
  677. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  678. JUMP_COND_SHRD | JUMP_COND_SELF);
  679. if (keys_fit_inline)
  680. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  681. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  682. else
  683. append_key(desc, ctx->key_dma, ctx->enckeylen,
  684. CLASS_1 | KEY_DEST_CLASS_REG);
  685. set_jump_tgt_here(desc, key_jump_cmd);
  686. /* class 1 operation */
  687. append_operation(desc, ctx->class1_alg_type |
  688. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  689. /* if assoclen + cryptlen is ZERO, skip to ICV write */
  690. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  691. zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
  692. JUMP_COND_MATH_Z);
  693. /* if assoclen is ZERO, skip reading the assoc data */
  694. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  695. zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
  696. JUMP_COND_MATH_Z);
  697. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  698. /* skip assoc data */
  699. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  700. /* cryptlen = seqinlen - assoclen */
  701. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
  702. /* if cryptlen is ZERO jump to zero-payload commands */
  703. zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
  704. JUMP_COND_MATH_Z);
  705. /* read assoc data */
  706. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  707. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  708. set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
  709. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  710. /* write encrypted data */
  711. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  712. /* read payload data */
  713. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  714. FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  715. /* jump the zero-payload commands */
  716. append_jump(desc, JUMP_TEST_ALL | 2);
  717. /* zero-payload commands */
  718. set_jump_tgt_here(desc, zero_payload_jump_cmd);
  719. /* read assoc data */
  720. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  721. FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
  722. /* There is no input data */
  723. set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
  724. /* write ICV */
  725. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  726. LDST_SRCDST_BYTE_CONTEXT);
  727. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  728. desc_bytes(desc),
  729. DMA_TO_DEVICE);
  730. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  731. dev_err(jrdev, "unable to map shared descriptor\n");
  732. return -ENOMEM;
  733. }
  734. #ifdef DEBUG
  735. print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
  736. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  737. desc_bytes(desc), 1);
  738. #endif
  739. /*
  740. * Job Descriptor and Shared Descriptors
  741. * must all fit into the 64-word Descriptor h/w Buffer
  742. */
  743. keys_fit_inline = false;
  744. if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
  745. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  746. keys_fit_inline = true;
  747. desc = ctx->sh_desc_dec;
  748. init_sh_desc(desc, HDR_SHARE_SERIAL);
  749. /* skip key loading if they are loaded due to sharing */
  750. key_jump_cmd = append_jump(desc, JUMP_JSL |
  751. JUMP_TEST_ALL | JUMP_COND_SHRD |
  752. JUMP_COND_SELF);
  753. if (keys_fit_inline)
  754. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  755. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  756. else
  757. append_key(desc, ctx->key_dma, ctx->enckeylen,
  758. CLASS_1 | KEY_DEST_CLASS_REG);
  759. set_jump_tgt_here(desc, key_jump_cmd);
  760. /* class 1 operation */
  761. append_operation(desc, ctx->class1_alg_type |
  762. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  763. /* if assoclen is ZERO, skip reading the assoc data */
  764. append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
  765. zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
  766. JUMP_COND_MATH_Z);
  767. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  768. /* skip assoc data */
  769. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  770. /* read assoc data */
  771. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  772. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  773. set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
  774. /* cryptlen = seqoutlen - assoclen */
  775. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  776. /* jump to zero-payload command if cryptlen is zero */
  777. zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
  778. JUMP_COND_MATH_Z);
  779. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  780. /* store encrypted data */
  781. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  782. /* read payload data */
  783. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  784. FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  785. /* zero-payload command */
  786. set_jump_tgt_here(desc, zero_payload_jump_cmd);
  787. /* read ICV */
  788. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  789. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  790. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  791. desc_bytes(desc),
  792. DMA_TO_DEVICE);
  793. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  794. dev_err(jrdev, "unable to map shared descriptor\n");
  795. return -ENOMEM;
  796. }
  797. #ifdef DEBUG
  798. print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
  799. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  800. desc_bytes(desc), 1);
  801. #endif
  802. return 0;
  803. }
  804. static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  805. {
  806. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  807. ctx->authsize = authsize;
  808. gcm_set_sh_desc(authenc);
  809. return 0;
  810. }
  811. static int rfc4106_set_sh_desc(struct crypto_aead *aead)
  812. {
  813. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  814. struct device *jrdev = ctx->jrdev;
  815. bool keys_fit_inline = false;
  816. u32 *key_jump_cmd;
  817. u32 *desc;
  818. if (!ctx->enckeylen || !ctx->authsize)
  819. return 0;
  820. /*
  821. * RFC4106 encrypt shared descriptor
  822. * Job Descriptor and Shared Descriptor
  823. * must fit into the 64-word Descriptor h/w Buffer
  824. */
  825. if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  826. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  827. keys_fit_inline = true;
  828. desc = ctx->sh_desc_enc;
  829. init_sh_desc(desc, HDR_SHARE_SERIAL);
  830. /* Skip key loading if it is loaded due to sharing */
  831. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  832. JUMP_COND_SHRD);
  833. if (keys_fit_inline)
  834. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  835. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  836. else
  837. append_key(desc, ctx->key_dma, ctx->enckeylen,
  838. CLASS_1 | KEY_DEST_CLASS_REG);
  839. set_jump_tgt_here(desc, key_jump_cmd);
  840. /* Class 1 operation */
  841. append_operation(desc, ctx->class1_alg_type |
  842. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  843. append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
  844. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  845. /* Read assoc data */
  846. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  847. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  848. /* Skip IV */
  849. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  850. /* Will read cryptlen bytes */
  851. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  852. /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
  853. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
  854. /* Skip assoc data */
  855. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  856. /* cryptlen = seqoutlen - assoclen */
  857. append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ);
  858. /* Write encrypted data */
  859. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  860. /* Read payload data */
  861. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  862. FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
  863. /* Write ICV */
  864. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  865. LDST_SRCDST_BYTE_CONTEXT);
  866. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  867. desc_bytes(desc),
  868. DMA_TO_DEVICE);
  869. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  870. dev_err(jrdev, "unable to map shared descriptor\n");
  871. return -ENOMEM;
  872. }
  873. #ifdef DEBUG
  874. print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
  875. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  876. desc_bytes(desc), 1);
  877. #endif
  878. /*
  879. * Job Descriptor and Shared Descriptors
  880. * must all fit into the 64-word Descriptor h/w Buffer
  881. */
  882. keys_fit_inline = false;
  883. if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
  884. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  885. keys_fit_inline = true;
  886. desc = ctx->sh_desc_dec;
  887. init_sh_desc(desc, HDR_SHARE_SERIAL);
  888. /* Skip key loading if it is loaded due to sharing */
  889. key_jump_cmd = append_jump(desc, JUMP_JSL |
  890. JUMP_TEST_ALL | JUMP_COND_SHRD);
  891. if (keys_fit_inline)
  892. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  893. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  894. else
  895. append_key(desc, ctx->key_dma, ctx->enckeylen,
  896. CLASS_1 | KEY_DEST_CLASS_REG);
  897. set_jump_tgt_here(desc, key_jump_cmd);
  898. /* Class 1 operation */
  899. append_operation(desc, ctx->class1_alg_type |
  900. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  901. append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
  902. append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
  903. /* Read assoc data */
  904. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  905. FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
  906. /* Skip IV */
  907. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  908. /* Will read cryptlen bytes */
  909. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ);
  910. /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */
  911. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG);
  912. /* Skip assoc data */
  913. append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
  914. /* Will write cryptlen bytes */
  915. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  916. /* Store payload data */
  917. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  918. /* Read encrypted data */
  919. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
  920. FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
  921. /* Read ICV */
  922. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  923. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  924. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  925. desc_bytes(desc),
  926. DMA_TO_DEVICE);
  927. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  928. dev_err(jrdev, "unable to map shared descriptor\n");
  929. return -ENOMEM;
  930. }
  931. #ifdef DEBUG
  932. print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
  933. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  934. desc_bytes(desc), 1);
  935. #endif
  936. return 0;
  937. }
  938. static int rfc4106_setauthsize(struct crypto_aead *authenc,
  939. unsigned int authsize)
  940. {
  941. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  942. ctx->authsize = authsize;
  943. rfc4106_set_sh_desc(authenc);
  944. return 0;
  945. }
  946. static int rfc4543_set_sh_desc(struct crypto_aead *aead)
  947. {
  948. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  949. struct device *jrdev = ctx->jrdev;
  950. bool keys_fit_inline = false;
  951. u32 *key_jump_cmd;
  952. u32 *read_move_cmd, *write_move_cmd;
  953. u32 *desc;
  954. if (!ctx->enckeylen || !ctx->authsize)
  955. return 0;
  956. /*
  957. * RFC4543 encrypt shared descriptor
  958. * Job Descriptor and Shared Descriptor
  959. * must fit into the 64-word Descriptor h/w Buffer
  960. */
  961. if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
  962. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  963. keys_fit_inline = true;
  964. desc = ctx->sh_desc_enc;
  965. init_sh_desc(desc, HDR_SHARE_SERIAL);
  966. /* Skip key loading if it is loaded due to sharing */
  967. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  968. JUMP_COND_SHRD);
  969. if (keys_fit_inline)
  970. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  971. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  972. else
  973. append_key(desc, ctx->key_dma, ctx->enckeylen,
  974. CLASS_1 | KEY_DEST_CLASS_REG);
  975. set_jump_tgt_here(desc, key_jump_cmd);
  976. /* Class 1 operation */
  977. append_operation(desc, ctx->class1_alg_type |
  978. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  979. /* assoclen + cryptlen = seqinlen */
  980. append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
  981. /*
  982. * MOVE_LEN opcode is not available in all SEC HW revisions,
  983. * thus need to do some magic, i.e. self-patch the descriptor
  984. * buffer.
  985. */
  986. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
  987. (0x6 << MOVE_LEN_SHIFT));
  988. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
  989. (0x8 << MOVE_LEN_SHIFT));
  990. /* Will read assoclen + cryptlen bytes */
  991. append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  992. /* Will write assoclen + cryptlen bytes */
  993. append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
  994. /* Read and write assoclen + cryptlen bytes */
  995. aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
  996. set_move_tgt_here(desc, read_move_cmd);
  997. set_move_tgt_here(desc, write_move_cmd);
  998. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  999. /* Move payload data to OFIFO */
  1000. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
  1001. /* Write ICV */
  1002. append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
  1003. LDST_SRCDST_BYTE_CONTEXT);
  1004. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  1005. desc_bytes(desc),
  1006. DMA_TO_DEVICE);
  1007. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  1008. dev_err(jrdev, "unable to map shared descriptor\n");
  1009. return -ENOMEM;
  1010. }
  1011. #ifdef DEBUG
  1012. print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
  1013. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1014. desc_bytes(desc), 1);
  1015. #endif
  1016. /*
  1017. * Job Descriptor and Shared Descriptors
  1018. * must all fit into the 64-word Descriptor h/w Buffer
  1019. */
  1020. keys_fit_inline = false;
  1021. if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
  1022. ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
  1023. keys_fit_inline = true;
  1024. desc = ctx->sh_desc_dec;
  1025. init_sh_desc(desc, HDR_SHARE_SERIAL);
  1026. /* Skip key loading if it is loaded due to sharing */
  1027. key_jump_cmd = append_jump(desc, JUMP_JSL |
  1028. JUMP_TEST_ALL | JUMP_COND_SHRD);
  1029. if (keys_fit_inline)
  1030. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1031. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  1032. else
  1033. append_key(desc, ctx->key_dma, ctx->enckeylen,
  1034. CLASS_1 | KEY_DEST_CLASS_REG);
  1035. set_jump_tgt_here(desc, key_jump_cmd);
  1036. /* Class 1 operation */
  1037. append_operation(desc, ctx->class1_alg_type |
  1038. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
  1039. /* assoclen + cryptlen = seqoutlen */
  1040. append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1041. /*
  1042. * MOVE_LEN opcode is not available in all SEC HW revisions,
  1043. * thus need to do some magic, i.e. self-patch the descriptor
  1044. * buffer.
  1045. */
  1046. read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
  1047. (0x6 << MOVE_LEN_SHIFT));
  1048. write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
  1049. (0x8 << MOVE_LEN_SHIFT));
  1050. /* Will read assoclen + cryptlen bytes */
  1051. append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1052. /* Will write assoclen + cryptlen bytes */
  1053. append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
  1054. /* Store payload data */
  1055. append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
  1056. /* In-snoop assoclen + cryptlen data */
  1057. append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
  1058. FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
  1059. set_move_tgt_here(desc, read_move_cmd);
  1060. set_move_tgt_here(desc, write_move_cmd);
  1061. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  1062. /* Move payload data to OFIFO */
  1063. append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
  1064. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  1065. /* Read ICV */
  1066. append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
  1067. FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
  1068. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  1069. desc_bytes(desc),
  1070. DMA_TO_DEVICE);
  1071. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1072. dev_err(jrdev, "unable to map shared descriptor\n");
  1073. return -ENOMEM;
  1074. }
  1075. #ifdef DEBUG
  1076. print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
  1077. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1078. desc_bytes(desc), 1);
  1079. #endif
  1080. return 0;
  1081. }
  1082. static int rfc4543_setauthsize(struct crypto_aead *authenc,
  1083. unsigned int authsize)
  1084. {
  1085. struct caam_ctx *ctx = crypto_aead_ctx(authenc);
  1086. ctx->authsize = authsize;
  1087. rfc4543_set_sh_desc(authenc);
  1088. return 0;
  1089. }
  1090. static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
  1091. u32 authkeylen)
  1092. {
  1093. return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
  1094. ctx->split_key_pad_len, key_in, authkeylen,
  1095. ctx->alg_op);
  1096. }
  1097. static int aead_setkey(struct crypto_aead *aead,
  1098. const u8 *key, unsigned int keylen)
  1099. {
  1100. /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
  1101. static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
  1102. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1103. struct device *jrdev = ctx->jrdev;
  1104. struct crypto_authenc_keys keys;
  1105. int ret = 0;
  1106. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  1107. goto badkey;
  1108. /* Pick class 2 key length from algorithm submask */
  1109. ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
  1110. OP_ALG_ALGSEL_SHIFT] * 2;
  1111. ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
  1112. if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  1113. goto badkey;
  1114. #ifdef DEBUG
  1115. printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
  1116. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  1117. keys.authkeylen);
  1118. printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
  1119. ctx->split_key_len, ctx->split_key_pad_len);
  1120. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1121. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1122. #endif
  1123. ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
  1124. if (ret) {
  1125. goto badkey;
  1126. }
  1127. /* postpend encryption key to auth split key */
  1128. memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
  1129. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
  1130. keys.enckeylen, DMA_TO_DEVICE);
  1131. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1132. dev_err(jrdev, "unable to map key i/o memory\n");
  1133. return -ENOMEM;
  1134. }
  1135. #ifdef DEBUG
  1136. print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
  1137. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  1138. ctx->split_key_pad_len + keys.enckeylen, 1);
  1139. #endif
  1140. ctx->enckeylen = keys.enckeylen;
  1141. ret = aead_set_sh_desc(aead);
  1142. if (ret) {
  1143. dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
  1144. keys.enckeylen, DMA_TO_DEVICE);
  1145. }
  1146. return ret;
  1147. badkey:
  1148. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1149. return -EINVAL;
  1150. }
  1151. static int gcm_setkey(struct crypto_aead *aead,
  1152. const u8 *key, unsigned int keylen)
  1153. {
  1154. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1155. struct device *jrdev = ctx->jrdev;
  1156. int ret = 0;
  1157. #ifdef DEBUG
  1158. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1159. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1160. #endif
  1161. memcpy(ctx->key, key, keylen);
  1162. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
  1163. DMA_TO_DEVICE);
  1164. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1165. dev_err(jrdev, "unable to map key i/o memory\n");
  1166. return -ENOMEM;
  1167. }
  1168. ctx->enckeylen = keylen;
  1169. ret = gcm_set_sh_desc(aead);
  1170. if (ret) {
  1171. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1172. DMA_TO_DEVICE);
  1173. }
  1174. return ret;
  1175. }
  1176. static int rfc4106_setkey(struct crypto_aead *aead,
  1177. const u8 *key, unsigned int keylen)
  1178. {
  1179. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1180. struct device *jrdev = ctx->jrdev;
  1181. int ret = 0;
  1182. if (keylen < 4)
  1183. return -EINVAL;
  1184. #ifdef DEBUG
  1185. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1186. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1187. #endif
  1188. memcpy(ctx->key, key, keylen);
  1189. /*
  1190. * The last four bytes of the key material are used as the salt value
  1191. * in the nonce. Update the AES key length.
  1192. */
  1193. ctx->enckeylen = keylen - 4;
  1194. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
  1195. DMA_TO_DEVICE);
  1196. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1197. dev_err(jrdev, "unable to map key i/o memory\n");
  1198. return -ENOMEM;
  1199. }
  1200. ret = rfc4106_set_sh_desc(aead);
  1201. if (ret) {
  1202. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1203. DMA_TO_DEVICE);
  1204. }
  1205. return ret;
  1206. }
  1207. static int rfc4543_setkey(struct crypto_aead *aead,
  1208. const u8 *key, unsigned int keylen)
  1209. {
  1210. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1211. struct device *jrdev = ctx->jrdev;
  1212. int ret = 0;
  1213. if (keylen < 4)
  1214. return -EINVAL;
  1215. #ifdef DEBUG
  1216. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1217. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1218. #endif
  1219. memcpy(ctx->key, key, keylen);
  1220. /*
  1221. * The last four bytes of the key material are used as the salt value
  1222. * in the nonce. Update the AES key length.
  1223. */
  1224. ctx->enckeylen = keylen - 4;
  1225. ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
  1226. DMA_TO_DEVICE);
  1227. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1228. dev_err(jrdev, "unable to map key i/o memory\n");
  1229. return -ENOMEM;
  1230. }
  1231. ret = rfc4543_set_sh_desc(aead);
  1232. if (ret) {
  1233. dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
  1234. DMA_TO_DEVICE);
  1235. }
  1236. return ret;
  1237. }
  1238. static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  1239. const u8 *key, unsigned int keylen)
  1240. {
  1241. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1242. struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
  1243. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
  1244. const char *alg_name = crypto_tfm_alg_name(tfm);
  1245. struct device *jrdev = ctx->jrdev;
  1246. int ret = 0;
  1247. u32 *key_jump_cmd;
  1248. u32 *desc;
  1249. u8 *nonce;
  1250. u32 geniv;
  1251. u32 ctx1_iv_off = 0;
  1252. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  1253. OP_ALG_AAI_CTR_MOD128);
  1254. const bool is_rfc3686 = (ctr_mode &&
  1255. (strstr(alg_name, "rfc3686") != NULL));
  1256. #ifdef DEBUG
  1257. print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
  1258. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  1259. #endif
  1260. /*
  1261. * AES-CTR needs to load IV in CONTEXT1 reg
  1262. * at an offset of 128bits (16bytes)
  1263. * CONTEXT1[255:128] = IV
  1264. */
  1265. if (ctr_mode)
  1266. ctx1_iv_off = 16;
  1267. /*
  1268. * RFC3686 specific:
  1269. * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  1270. * | *key = {KEY, NONCE}
  1271. */
  1272. if (is_rfc3686) {
  1273. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  1274. keylen -= CTR_RFC3686_NONCE_SIZE;
  1275. }
  1276. memcpy(ctx->key, key, keylen);
  1277. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
  1278. DMA_TO_DEVICE);
  1279. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1280. dev_err(jrdev, "unable to map key i/o memory\n");
  1281. return -ENOMEM;
  1282. }
  1283. ctx->enckeylen = keylen;
  1284. /* ablkcipher_encrypt shared descriptor */
  1285. desc = ctx->sh_desc_enc;
  1286. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1287. /* Skip if already shared */
  1288. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1289. JUMP_COND_SHRD);
  1290. /* Load class1 key only */
  1291. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1292. ctx->enckeylen, CLASS_1 |
  1293. KEY_DEST_CLASS_REG);
  1294. /* Load nonce into CONTEXT1 reg */
  1295. if (is_rfc3686) {
  1296. nonce = (u8 *)key + keylen;
  1297. append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
  1298. LDST_CLASS_IND_CCB |
  1299. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1300. append_move(desc, MOVE_WAITCOMP |
  1301. MOVE_SRC_OUTFIFO |
  1302. MOVE_DEST_CLASS1CTX |
  1303. (16 << MOVE_OFFSET_SHIFT) |
  1304. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1305. }
  1306. set_jump_tgt_here(desc, key_jump_cmd);
  1307. /* Load iv */
  1308. append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
  1309. LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1310. /* Load counter into CONTEXT1 reg */
  1311. if (is_rfc3686)
  1312. append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
  1313. LDST_SRCDST_BYTE_CONTEXT |
  1314. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1315. LDST_OFFSET_SHIFT));
  1316. /* Load operation */
  1317. append_operation(desc, ctx->class1_alg_type |
  1318. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  1319. /* Perform operation */
  1320. ablkcipher_append_src_dst(desc);
  1321. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
  1322. desc_bytes(desc),
  1323. DMA_TO_DEVICE);
  1324. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  1325. dev_err(jrdev, "unable to map shared descriptor\n");
  1326. return -ENOMEM;
  1327. }
  1328. #ifdef DEBUG
  1329. print_hex_dump(KERN_ERR,
  1330. "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
  1331. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1332. desc_bytes(desc), 1);
  1333. #endif
  1334. /* ablkcipher_decrypt shared descriptor */
  1335. desc = ctx->sh_desc_dec;
  1336. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1337. /* Skip if already shared */
  1338. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1339. JUMP_COND_SHRD);
  1340. /* Load class1 key only */
  1341. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1342. ctx->enckeylen, CLASS_1 |
  1343. KEY_DEST_CLASS_REG);
  1344. /* Load nonce into CONTEXT1 reg */
  1345. if (is_rfc3686) {
  1346. nonce = (u8 *)key + keylen;
  1347. append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
  1348. LDST_CLASS_IND_CCB |
  1349. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1350. append_move(desc, MOVE_WAITCOMP |
  1351. MOVE_SRC_OUTFIFO |
  1352. MOVE_DEST_CLASS1CTX |
  1353. (16 << MOVE_OFFSET_SHIFT) |
  1354. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1355. }
  1356. set_jump_tgt_here(desc, key_jump_cmd);
  1357. /* load IV */
  1358. append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
  1359. LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1360. /* Load counter into CONTEXT1 reg */
  1361. if (is_rfc3686)
  1362. append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
  1363. LDST_SRCDST_BYTE_CONTEXT |
  1364. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1365. LDST_OFFSET_SHIFT));
  1366. /* Choose operation */
  1367. if (ctr_mode)
  1368. append_operation(desc, ctx->class1_alg_type |
  1369. OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
  1370. else
  1371. append_dec_op1(desc, ctx->class1_alg_type);
  1372. /* Perform operation */
  1373. ablkcipher_append_src_dst(desc);
  1374. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
  1375. desc_bytes(desc),
  1376. DMA_TO_DEVICE);
  1377. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1378. dev_err(jrdev, "unable to map shared descriptor\n");
  1379. return -ENOMEM;
  1380. }
  1381. #ifdef DEBUG
  1382. print_hex_dump(KERN_ERR,
  1383. "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
  1384. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1385. desc_bytes(desc), 1);
  1386. #endif
  1387. /* ablkcipher_givencrypt shared descriptor */
  1388. desc = ctx->sh_desc_givenc;
  1389. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1390. /* Skip if already shared */
  1391. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1392. JUMP_COND_SHRD);
  1393. /* Load class1 key only */
  1394. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1395. ctx->enckeylen, CLASS_1 |
  1396. KEY_DEST_CLASS_REG);
  1397. /* Load Nonce into CONTEXT1 reg */
  1398. if (is_rfc3686) {
  1399. nonce = (u8 *)key + keylen;
  1400. append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
  1401. LDST_CLASS_IND_CCB |
  1402. LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
  1403. append_move(desc, MOVE_WAITCOMP |
  1404. MOVE_SRC_OUTFIFO |
  1405. MOVE_DEST_CLASS1CTX |
  1406. (16 << MOVE_OFFSET_SHIFT) |
  1407. (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
  1408. }
  1409. set_jump_tgt_here(desc, key_jump_cmd);
  1410. /* Generate IV */
  1411. geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
  1412. NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
  1413. NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
  1414. append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
  1415. LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
  1416. append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
  1417. append_move(desc, MOVE_WAITCOMP |
  1418. MOVE_SRC_INFIFO |
  1419. MOVE_DEST_CLASS1CTX |
  1420. (crt->ivsize << MOVE_LEN_SHIFT) |
  1421. (ctx1_iv_off << MOVE_OFFSET_SHIFT));
  1422. append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
  1423. /* Copy generated IV to memory */
  1424. append_seq_store(desc, crt->ivsize,
  1425. LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
  1426. (ctx1_iv_off << LDST_OFFSET_SHIFT));
  1427. /* Load Counter into CONTEXT1 reg */
  1428. if (is_rfc3686)
  1429. append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
  1430. LDST_SRCDST_BYTE_CONTEXT |
  1431. ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
  1432. LDST_OFFSET_SHIFT));
  1433. if (ctx1_iv_off)
  1434. append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
  1435. (1 << JUMP_OFFSET_SHIFT));
  1436. /* Load operation */
  1437. append_operation(desc, ctx->class1_alg_type |
  1438. OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
  1439. /* Perform operation */
  1440. ablkcipher_append_src_dst(desc);
  1441. ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
  1442. desc_bytes(desc),
  1443. DMA_TO_DEVICE);
  1444. if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
  1445. dev_err(jrdev, "unable to map shared descriptor\n");
  1446. return -ENOMEM;
  1447. }
  1448. #ifdef DEBUG
  1449. print_hex_dump(KERN_ERR,
  1450. "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
  1451. DUMP_PREFIX_ADDRESS, 16, 4, desc,
  1452. desc_bytes(desc), 1);
  1453. #endif
  1454. return ret;
  1455. }
  1456. static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  1457. const u8 *key, unsigned int keylen)
  1458. {
  1459. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  1460. struct device *jrdev = ctx->jrdev;
  1461. u32 *key_jump_cmd, *desc;
  1462. __be64 sector_size = cpu_to_be64(512);
  1463. if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
  1464. crypto_ablkcipher_set_flags(ablkcipher,
  1465. CRYPTO_TFM_RES_BAD_KEY_LEN);
  1466. dev_err(jrdev, "key size mismatch\n");
  1467. return -EINVAL;
  1468. }
  1469. memcpy(ctx->key, key, keylen);
  1470. ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
  1471. if (dma_mapping_error(jrdev, ctx->key_dma)) {
  1472. dev_err(jrdev, "unable to map key i/o memory\n");
  1473. return -ENOMEM;
  1474. }
  1475. ctx->enckeylen = keylen;
  1476. /* xts_ablkcipher_encrypt shared descriptor */
  1477. desc = ctx->sh_desc_enc;
  1478. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1479. /* Skip if already shared */
  1480. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1481. JUMP_COND_SHRD);
  1482. /* Load class1 keys only */
  1483. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1484. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  1485. /* Load sector size with index 40 bytes (0x28) */
  1486. append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
  1487. LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
  1488. append_data(desc, (void *)&sector_size, 8);
  1489. set_jump_tgt_here(desc, key_jump_cmd);
  1490. /*
  1491. * create sequence for loading the sector index
  1492. * Upper 8B of IV - will be used as sector index
  1493. * Lower 8B of IV - will be discarded
  1494. */
  1495. append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
  1496. LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
  1497. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  1498. /* Load operation */
  1499. append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
  1500. OP_ALG_ENCRYPT);
  1501. /* Perform operation */
  1502. ablkcipher_append_src_dst(desc);
  1503. ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
  1504. DMA_TO_DEVICE);
  1505. if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
  1506. dev_err(jrdev, "unable to map shared descriptor\n");
  1507. return -ENOMEM;
  1508. }
  1509. #ifdef DEBUG
  1510. print_hex_dump(KERN_ERR,
  1511. "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
  1512. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
  1513. #endif
  1514. /* xts_ablkcipher_decrypt shared descriptor */
  1515. desc = ctx->sh_desc_dec;
  1516. init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
  1517. /* Skip if already shared */
  1518. key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
  1519. JUMP_COND_SHRD);
  1520. /* Load class1 key only */
  1521. append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
  1522. ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
  1523. /* Load sector size with index 40 bytes (0x28) */
  1524. append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
  1525. LDST_CLASS_1_CCB | (0x28 << LDST_OFFSET_SHIFT) | 8);
  1526. append_data(desc, (void *)&sector_size, 8);
  1527. set_jump_tgt_here(desc, key_jump_cmd);
  1528. /*
  1529. * create sequence for loading the sector index
  1530. * Upper 8B of IV - will be used as sector index
  1531. * Lower 8B of IV - will be discarded
  1532. */
  1533. append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
  1534. LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT) | 8);
  1535. append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
  1536. /* Load operation */
  1537. append_dec_op1(desc, ctx->class1_alg_type);
  1538. /* Perform operation */
  1539. ablkcipher_append_src_dst(desc);
  1540. ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
  1541. DMA_TO_DEVICE);
  1542. if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
  1543. dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
  1544. desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
  1545. dev_err(jrdev, "unable to map shared descriptor\n");
  1546. return -ENOMEM;
  1547. }
  1548. #ifdef DEBUG
  1549. print_hex_dump(KERN_ERR,
  1550. "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
  1551. DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
  1552. #endif
  1553. return 0;
  1554. }
  1555. /*
  1556. * aead_edesc - s/w-extended aead descriptor
  1557. * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
  1558. * @src_nents: number of segments in input scatterlist
  1559. * @dst_nents: number of segments in output scatterlist
  1560. * @iv_dma: dma address of iv for checking continuity and link table
  1561. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  1562. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  1563. * @sec4_sg_dma: bus physical mapped address of h/w link table
  1564. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  1565. */
  1566. struct aead_edesc {
  1567. int assoc_nents;
  1568. int src_nents;
  1569. int dst_nents;
  1570. dma_addr_t iv_dma;
  1571. int sec4_sg_bytes;
  1572. dma_addr_t sec4_sg_dma;
  1573. struct sec4_sg_entry *sec4_sg;
  1574. u32 hw_desc[];
  1575. };
  1576. /*
  1577. * ablkcipher_edesc - s/w-extended ablkcipher descriptor
  1578. * @src_nents: number of segments in input scatterlist
  1579. * @dst_nents: number of segments in output scatterlist
  1580. * @iv_dma: dma address of iv for checking continuity and link table
  1581. * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
  1582. * @sec4_sg_bytes: length of dma mapped sec4_sg space
  1583. * @sec4_sg_dma: bus physical mapped address of h/w link table
  1584. * @hw_desc: the h/w job descriptor followed by any referenced link tables
  1585. */
  1586. struct ablkcipher_edesc {
  1587. int src_nents;
  1588. int dst_nents;
  1589. dma_addr_t iv_dma;
  1590. int sec4_sg_bytes;
  1591. dma_addr_t sec4_sg_dma;
  1592. struct sec4_sg_entry *sec4_sg;
  1593. u32 hw_desc[0];
  1594. };
  1595. static void caam_unmap(struct device *dev, struct scatterlist *src,
  1596. struct scatterlist *dst, int src_nents,
  1597. int dst_nents,
  1598. dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
  1599. int sec4_sg_bytes)
  1600. {
  1601. if (dst != src) {
  1602. dma_unmap_sg(dev, src, src_nents ? : 1, DMA_TO_DEVICE);
  1603. dma_unmap_sg(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE);
  1604. } else {
  1605. dma_unmap_sg(dev, src, src_nents ? : 1, DMA_BIDIRECTIONAL);
  1606. }
  1607. if (iv_dma)
  1608. dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
  1609. if (sec4_sg_bytes)
  1610. dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
  1611. DMA_TO_DEVICE);
  1612. }
  1613. static void aead_unmap(struct device *dev,
  1614. struct aead_edesc *edesc,
  1615. struct aead_request *req)
  1616. {
  1617. caam_unmap(dev, req->src, req->dst,
  1618. edesc->src_nents, edesc->dst_nents, 0, 0,
  1619. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  1620. }
  1621. static void ablkcipher_unmap(struct device *dev,
  1622. struct ablkcipher_edesc *edesc,
  1623. struct ablkcipher_request *req)
  1624. {
  1625. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1626. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1627. caam_unmap(dev, req->src, req->dst,
  1628. edesc->src_nents, edesc->dst_nents,
  1629. edesc->iv_dma, ivsize,
  1630. edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
  1631. }
  1632. static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1633. void *context)
  1634. {
  1635. struct aead_request *req = context;
  1636. struct aead_edesc *edesc;
  1637. #ifdef DEBUG
  1638. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1639. #endif
  1640. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  1641. if (err)
  1642. caam_jr_strstatus(jrdev, err);
  1643. aead_unmap(jrdev, edesc, req);
  1644. kfree(edesc);
  1645. aead_request_complete(req, err);
  1646. }
  1647. static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1648. void *context)
  1649. {
  1650. struct aead_request *req = context;
  1651. struct aead_edesc *edesc;
  1652. #ifdef DEBUG
  1653. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1654. #endif
  1655. edesc = container_of(desc, struct aead_edesc, hw_desc[0]);
  1656. if (err)
  1657. caam_jr_strstatus(jrdev, err);
  1658. aead_unmap(jrdev, edesc, req);
  1659. /*
  1660. * verify hw auth check passed else return -EBADMSG
  1661. */
  1662. if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
  1663. err = -EBADMSG;
  1664. kfree(edesc);
  1665. aead_request_complete(req, err);
  1666. }
  1667. static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1668. void *context)
  1669. {
  1670. struct ablkcipher_request *req = context;
  1671. struct ablkcipher_edesc *edesc;
  1672. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1673. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1674. #ifdef DEBUG
  1675. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1676. #endif
  1677. edesc = (struct ablkcipher_edesc *)((char *)desc -
  1678. offsetof(struct ablkcipher_edesc, hw_desc));
  1679. if (err)
  1680. caam_jr_strstatus(jrdev, err);
  1681. #ifdef DEBUG
  1682. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  1683. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1684. edesc->src_nents > 1 ? 100 : ivsize, 1);
  1685. dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  1686. DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  1687. edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
  1688. #endif
  1689. ablkcipher_unmap(jrdev, edesc, req);
  1690. /*
  1691. * The crypto API expects us to set the IV (req->info) to the last
  1692. * ciphertext block. This is used e.g. by the CTS mode.
  1693. */
  1694. scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
  1695. ivsize, 0);
  1696. kfree(edesc);
  1697. ablkcipher_request_complete(req, err);
  1698. }
  1699. static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
  1700. void *context)
  1701. {
  1702. struct ablkcipher_request *req = context;
  1703. struct ablkcipher_edesc *edesc;
  1704. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1705. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1706. #ifdef DEBUG
  1707. dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
  1708. #endif
  1709. edesc = (struct ablkcipher_edesc *)((char *)desc -
  1710. offsetof(struct ablkcipher_edesc, hw_desc));
  1711. if (err)
  1712. caam_jr_strstatus(jrdev, err);
  1713. #ifdef DEBUG
  1714. print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
  1715. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1716. ivsize, 1);
  1717. dbg_dump_sg(KERN_ERR, "dst @"__stringify(__LINE__)": ",
  1718. DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  1719. edesc->dst_nents > 1 ? 100 : req->nbytes, 1, true);
  1720. #endif
  1721. ablkcipher_unmap(jrdev, edesc, req);
  1722. /*
  1723. * The crypto API expects us to set the IV (req->info) to the last
  1724. * ciphertext block.
  1725. */
  1726. scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
  1727. ivsize, 0);
  1728. kfree(edesc);
  1729. ablkcipher_request_complete(req, err);
  1730. }
  1731. /*
  1732. * Fill in aead job descriptor
  1733. */
  1734. static void init_aead_job(struct aead_request *req,
  1735. struct aead_edesc *edesc,
  1736. bool all_contig, bool encrypt)
  1737. {
  1738. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1739. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1740. int authsize = ctx->authsize;
  1741. u32 *desc = edesc->hw_desc;
  1742. u32 out_options, in_options;
  1743. dma_addr_t dst_dma, src_dma;
  1744. int len, sec4_sg_index = 0;
  1745. dma_addr_t ptr;
  1746. u32 *sh_desc;
  1747. sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
  1748. ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
  1749. len = desc_len(sh_desc);
  1750. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1751. if (all_contig) {
  1752. src_dma = sg_dma_address(req->src);
  1753. in_options = 0;
  1754. } else {
  1755. src_dma = edesc->sec4_sg_dma;
  1756. sec4_sg_index += edesc->src_nents;
  1757. in_options = LDST_SGF;
  1758. }
  1759. append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen,
  1760. in_options);
  1761. dst_dma = src_dma;
  1762. out_options = in_options;
  1763. if (unlikely(req->src != req->dst)) {
  1764. if (!edesc->dst_nents) {
  1765. dst_dma = sg_dma_address(req->dst);
  1766. } else {
  1767. dst_dma = edesc->sec4_sg_dma +
  1768. sec4_sg_index *
  1769. sizeof(struct sec4_sg_entry);
  1770. out_options = LDST_SGF;
  1771. }
  1772. }
  1773. if (encrypt)
  1774. append_seq_out_ptr(desc, dst_dma,
  1775. req->assoclen + req->cryptlen + authsize,
  1776. out_options);
  1777. else
  1778. append_seq_out_ptr(desc, dst_dma,
  1779. req->assoclen + req->cryptlen - authsize,
  1780. out_options);
  1781. /* REG3 = assoclen */
  1782. append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
  1783. }
  1784. static void init_gcm_job(struct aead_request *req,
  1785. struct aead_edesc *edesc,
  1786. bool all_contig, bool encrypt)
  1787. {
  1788. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1789. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1790. unsigned int ivsize = crypto_aead_ivsize(aead);
  1791. u32 *desc = edesc->hw_desc;
  1792. bool generic_gcm = (ivsize == 12);
  1793. unsigned int last;
  1794. init_aead_job(req, edesc, all_contig, encrypt);
  1795. /* BUG This should not be specific to generic GCM. */
  1796. last = 0;
  1797. if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen))
  1798. last = FIFOLD_TYPE_LAST1;
  1799. /* Read GCM IV */
  1800. append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
  1801. FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
  1802. /* Append Salt */
  1803. if (!generic_gcm)
  1804. append_data(desc, ctx->key + ctx->enckeylen, 4);
  1805. /* Append IV */
  1806. append_data(desc, req->iv, ivsize);
  1807. /* End of blank commands */
  1808. }
  1809. static void init_authenc_job(struct aead_request *req,
  1810. struct aead_edesc *edesc,
  1811. bool all_contig, bool encrypt)
  1812. {
  1813. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1814. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  1815. struct caam_aead_alg, aead);
  1816. unsigned int ivsize = crypto_aead_ivsize(aead);
  1817. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1818. const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
  1819. OP_ALG_AAI_CTR_MOD128);
  1820. const bool is_rfc3686 = alg->caam.rfc3686;
  1821. u32 *desc = edesc->hw_desc;
  1822. u32 ivoffset = 0;
  1823. /*
  1824. * AES-CTR needs to load IV in CONTEXT1 reg
  1825. * at an offset of 128bits (16bytes)
  1826. * CONTEXT1[255:128] = IV
  1827. */
  1828. if (ctr_mode)
  1829. ivoffset = 16;
  1830. /*
  1831. * RFC3686 specific:
  1832. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  1833. */
  1834. if (is_rfc3686)
  1835. ivoffset = 16 + CTR_RFC3686_NONCE_SIZE;
  1836. init_aead_job(req, edesc, all_contig, encrypt);
  1837. if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
  1838. append_load_as_imm(desc, req->iv, ivsize,
  1839. LDST_CLASS_1_CCB |
  1840. LDST_SRCDST_BYTE_CONTEXT |
  1841. (ivoffset << LDST_OFFSET_SHIFT));
  1842. }
  1843. /*
  1844. * Fill in ablkcipher job descriptor
  1845. */
  1846. static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
  1847. struct ablkcipher_edesc *edesc,
  1848. struct ablkcipher_request *req,
  1849. bool iv_contig)
  1850. {
  1851. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1852. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1853. u32 *desc = edesc->hw_desc;
  1854. u32 out_options = 0, in_options;
  1855. dma_addr_t dst_dma, src_dma;
  1856. int len, sec4_sg_index = 0;
  1857. #ifdef DEBUG
  1858. bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  1859. CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
  1860. print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
  1861. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1862. ivsize, 1);
  1863. printk(KERN_ERR "asked=%d, nbytes%d\n", (int)edesc->src_nents ? 100 : req->nbytes, req->nbytes);
  1864. dbg_dump_sg(KERN_ERR, "src @"__stringify(__LINE__)": ",
  1865. DUMP_PREFIX_ADDRESS, 16, 4, req->src,
  1866. edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
  1867. #endif
  1868. len = desc_len(sh_desc);
  1869. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1870. if (iv_contig) {
  1871. src_dma = edesc->iv_dma;
  1872. in_options = 0;
  1873. } else {
  1874. src_dma = edesc->sec4_sg_dma;
  1875. sec4_sg_index += edesc->src_nents + 1;
  1876. in_options = LDST_SGF;
  1877. }
  1878. append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
  1879. if (likely(req->src == req->dst)) {
  1880. if (!edesc->src_nents && iv_contig) {
  1881. dst_dma = sg_dma_address(req->src);
  1882. } else {
  1883. dst_dma = edesc->sec4_sg_dma +
  1884. sizeof(struct sec4_sg_entry);
  1885. out_options = LDST_SGF;
  1886. }
  1887. } else {
  1888. if (!edesc->dst_nents) {
  1889. dst_dma = sg_dma_address(req->dst);
  1890. } else {
  1891. dst_dma = edesc->sec4_sg_dma +
  1892. sec4_sg_index * sizeof(struct sec4_sg_entry);
  1893. out_options = LDST_SGF;
  1894. }
  1895. }
  1896. append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
  1897. }
  1898. /*
  1899. * Fill in ablkcipher givencrypt job descriptor
  1900. */
  1901. static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
  1902. struct ablkcipher_edesc *edesc,
  1903. struct ablkcipher_request *req,
  1904. bool iv_contig)
  1905. {
  1906. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  1907. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  1908. u32 *desc = edesc->hw_desc;
  1909. u32 out_options, in_options;
  1910. dma_addr_t dst_dma, src_dma;
  1911. int len, sec4_sg_index = 0;
  1912. #ifdef DEBUG
  1913. bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  1914. CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
  1915. print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
  1916. DUMP_PREFIX_ADDRESS, 16, 4, req->info,
  1917. ivsize, 1);
  1918. dbg_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
  1919. DUMP_PREFIX_ADDRESS, 16, 4, req->src,
  1920. edesc->src_nents ? 100 : req->nbytes, 1, may_sleep);
  1921. #endif
  1922. len = desc_len(sh_desc);
  1923. init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
  1924. if (!edesc->src_nents) {
  1925. src_dma = sg_dma_address(req->src);
  1926. in_options = 0;
  1927. } else {
  1928. src_dma = edesc->sec4_sg_dma;
  1929. sec4_sg_index += edesc->src_nents;
  1930. in_options = LDST_SGF;
  1931. }
  1932. append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
  1933. if (iv_contig) {
  1934. dst_dma = edesc->iv_dma;
  1935. out_options = 0;
  1936. } else {
  1937. dst_dma = edesc->sec4_sg_dma +
  1938. sec4_sg_index * sizeof(struct sec4_sg_entry);
  1939. out_options = LDST_SGF;
  1940. }
  1941. append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
  1942. }
  1943. /*
  1944. * allocate and map the aead extended descriptor
  1945. */
  1946. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  1947. int desc_bytes, bool *all_contig_ptr,
  1948. bool encrypt)
  1949. {
  1950. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  1951. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  1952. struct device *jrdev = ctx->jrdev;
  1953. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  1954. CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
  1955. int src_nents, dst_nents = 0;
  1956. struct aead_edesc *edesc;
  1957. int sgc;
  1958. bool all_contig = true;
  1959. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  1960. unsigned int authsize = ctx->authsize;
  1961. if (unlikely(req->dst != req->src)) {
  1962. src_nents = sg_count(req->src, req->assoclen + req->cryptlen);
  1963. dst_nents = sg_count(req->dst,
  1964. req->assoclen + req->cryptlen +
  1965. (encrypt ? authsize : (-authsize)));
  1966. } else {
  1967. src_nents = sg_count(req->src,
  1968. req->assoclen + req->cryptlen +
  1969. (encrypt ? authsize : 0));
  1970. }
  1971. /* Check if data are contiguous. */
  1972. all_contig = !src_nents;
  1973. if (!all_contig) {
  1974. src_nents = src_nents ? : 1;
  1975. sec4_sg_len = src_nents;
  1976. }
  1977. sec4_sg_len += dst_nents;
  1978. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  1979. /* allocate space for base edesc and hw desc commands, link tables */
  1980. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  1981. GFP_DMA | flags);
  1982. if (!edesc) {
  1983. dev_err(jrdev, "could not allocate extended descriptor\n");
  1984. return ERR_PTR(-ENOMEM);
  1985. }
  1986. if (likely(req->src == req->dst)) {
  1987. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  1988. DMA_BIDIRECTIONAL);
  1989. if (unlikely(!sgc)) {
  1990. dev_err(jrdev, "unable to map source\n");
  1991. kfree(edesc);
  1992. return ERR_PTR(-ENOMEM);
  1993. }
  1994. } else {
  1995. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  1996. DMA_TO_DEVICE);
  1997. if (unlikely(!sgc)) {
  1998. dev_err(jrdev, "unable to map source\n");
  1999. kfree(edesc);
  2000. return ERR_PTR(-ENOMEM);
  2001. }
  2002. sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
  2003. DMA_FROM_DEVICE);
  2004. if (unlikely(!sgc)) {
  2005. dev_err(jrdev, "unable to map destination\n");
  2006. dma_unmap_sg(jrdev, req->src, src_nents ? : 1,
  2007. DMA_TO_DEVICE);
  2008. kfree(edesc);
  2009. return ERR_PTR(-ENOMEM);
  2010. }
  2011. }
  2012. edesc->src_nents = src_nents;
  2013. edesc->dst_nents = dst_nents;
  2014. edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
  2015. desc_bytes;
  2016. *all_contig_ptr = all_contig;
  2017. sec4_sg_index = 0;
  2018. if (!all_contig) {
  2019. sg_to_sec4_sg_last(req->src, src_nents,
  2020. edesc->sec4_sg + sec4_sg_index, 0);
  2021. sec4_sg_index += src_nents;
  2022. }
  2023. if (dst_nents) {
  2024. sg_to_sec4_sg_last(req->dst, dst_nents,
  2025. edesc->sec4_sg + sec4_sg_index, 0);
  2026. }
  2027. if (!sec4_sg_bytes)
  2028. return edesc;
  2029. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2030. sec4_sg_bytes, DMA_TO_DEVICE);
  2031. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2032. dev_err(jrdev, "unable to map S/G table\n");
  2033. aead_unmap(jrdev, edesc, req);
  2034. kfree(edesc);
  2035. return ERR_PTR(-ENOMEM);
  2036. }
  2037. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2038. return edesc;
  2039. }
  2040. static int gcm_encrypt(struct aead_request *req)
  2041. {
  2042. struct aead_edesc *edesc;
  2043. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2044. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2045. struct device *jrdev = ctx->jrdev;
  2046. bool all_contig;
  2047. u32 *desc;
  2048. int ret = 0;
  2049. /* allocate extended descriptor */
  2050. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true);
  2051. if (IS_ERR(edesc))
  2052. return PTR_ERR(edesc);
  2053. /* Create and submit job descriptor */
  2054. init_gcm_job(req, edesc, all_contig, true);
  2055. #ifdef DEBUG
  2056. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2057. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2058. desc_bytes(edesc->hw_desc), 1);
  2059. #endif
  2060. desc = edesc->hw_desc;
  2061. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  2062. if (!ret) {
  2063. ret = -EINPROGRESS;
  2064. } else {
  2065. aead_unmap(jrdev, edesc, req);
  2066. kfree(edesc);
  2067. }
  2068. return ret;
  2069. }
  2070. static int ipsec_gcm_encrypt(struct aead_request *req)
  2071. {
  2072. if (req->assoclen < 8)
  2073. return -EINVAL;
  2074. return gcm_encrypt(req);
  2075. }
  2076. static int aead_encrypt(struct aead_request *req)
  2077. {
  2078. struct aead_edesc *edesc;
  2079. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2080. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2081. struct device *jrdev = ctx->jrdev;
  2082. bool all_contig;
  2083. u32 *desc;
  2084. int ret = 0;
  2085. /* allocate extended descriptor */
  2086. edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
  2087. &all_contig, true);
  2088. if (IS_ERR(edesc))
  2089. return PTR_ERR(edesc);
  2090. /* Create and submit job descriptor */
  2091. init_authenc_job(req, edesc, all_contig, true);
  2092. #ifdef DEBUG
  2093. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2094. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2095. desc_bytes(edesc->hw_desc), 1);
  2096. #endif
  2097. desc = edesc->hw_desc;
  2098. ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
  2099. if (!ret) {
  2100. ret = -EINPROGRESS;
  2101. } else {
  2102. aead_unmap(jrdev, edesc, req);
  2103. kfree(edesc);
  2104. }
  2105. return ret;
  2106. }
  2107. static int gcm_decrypt(struct aead_request *req)
  2108. {
  2109. struct aead_edesc *edesc;
  2110. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2111. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2112. struct device *jrdev = ctx->jrdev;
  2113. bool all_contig;
  2114. u32 *desc;
  2115. int ret = 0;
  2116. /* allocate extended descriptor */
  2117. edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false);
  2118. if (IS_ERR(edesc))
  2119. return PTR_ERR(edesc);
  2120. /* Create and submit job descriptor*/
  2121. init_gcm_job(req, edesc, all_contig, false);
  2122. #ifdef DEBUG
  2123. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2124. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2125. desc_bytes(edesc->hw_desc), 1);
  2126. #endif
  2127. desc = edesc->hw_desc;
  2128. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  2129. if (!ret) {
  2130. ret = -EINPROGRESS;
  2131. } else {
  2132. aead_unmap(jrdev, edesc, req);
  2133. kfree(edesc);
  2134. }
  2135. return ret;
  2136. }
  2137. static int ipsec_gcm_decrypt(struct aead_request *req)
  2138. {
  2139. if (req->assoclen < 8)
  2140. return -EINVAL;
  2141. return gcm_decrypt(req);
  2142. }
  2143. static int aead_decrypt(struct aead_request *req)
  2144. {
  2145. struct aead_edesc *edesc;
  2146. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2147. struct caam_ctx *ctx = crypto_aead_ctx(aead);
  2148. struct device *jrdev = ctx->jrdev;
  2149. bool all_contig;
  2150. u32 *desc;
  2151. int ret = 0;
  2152. #ifdef DEBUG
  2153. bool may_sleep = ((req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2154. CRYPTO_TFM_REQ_MAY_SLEEP)) != 0);
  2155. dbg_dump_sg(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
  2156. DUMP_PREFIX_ADDRESS, 16, 4, req->src,
  2157. req->assoclen + req->cryptlen, 1, may_sleep);
  2158. #endif
  2159. /* allocate extended descriptor */
  2160. edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
  2161. &all_contig, false);
  2162. if (IS_ERR(edesc))
  2163. return PTR_ERR(edesc);
  2164. /* Create and submit job descriptor*/
  2165. init_authenc_job(req, edesc, all_contig, false);
  2166. #ifdef DEBUG
  2167. print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
  2168. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2169. desc_bytes(edesc->hw_desc), 1);
  2170. #endif
  2171. desc = edesc->hw_desc;
  2172. ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
  2173. if (!ret) {
  2174. ret = -EINPROGRESS;
  2175. } else {
  2176. aead_unmap(jrdev, edesc, req);
  2177. kfree(edesc);
  2178. }
  2179. return ret;
  2180. }
  2181. /*
  2182. * allocate and map the ablkcipher extended descriptor for ablkcipher
  2183. */
  2184. static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
  2185. *req, int desc_bytes,
  2186. bool *iv_contig_out)
  2187. {
  2188. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2189. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2190. struct device *jrdev = ctx->jrdev;
  2191. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  2192. GFP_KERNEL : GFP_ATOMIC;
  2193. int src_nents, dst_nents = 0, sec4_sg_bytes;
  2194. struct ablkcipher_edesc *edesc;
  2195. dma_addr_t iv_dma = 0;
  2196. bool iv_contig = false;
  2197. int sgc;
  2198. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  2199. int sec4_sg_index;
  2200. src_nents = sg_count(req->src, req->nbytes);
  2201. if (req->dst != req->src)
  2202. dst_nents = sg_count(req->dst, req->nbytes);
  2203. if (likely(req->src == req->dst)) {
  2204. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  2205. DMA_BIDIRECTIONAL);
  2206. } else {
  2207. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  2208. DMA_TO_DEVICE);
  2209. sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
  2210. DMA_FROM_DEVICE);
  2211. }
  2212. iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
  2213. if (dma_mapping_error(jrdev, iv_dma)) {
  2214. dev_err(jrdev, "unable to map IV\n");
  2215. return ERR_PTR(-ENOMEM);
  2216. }
  2217. /*
  2218. * Check if iv can be contiguous with source and destination.
  2219. * If so, include it. If not, create scatterlist.
  2220. */
  2221. if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
  2222. iv_contig = true;
  2223. else
  2224. src_nents = src_nents ? : 1;
  2225. sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
  2226. sizeof(struct sec4_sg_entry);
  2227. /* allocate space for base edesc and hw desc commands, link tables */
  2228. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  2229. GFP_DMA | flags);
  2230. if (!edesc) {
  2231. dev_err(jrdev, "could not allocate extended descriptor\n");
  2232. return ERR_PTR(-ENOMEM);
  2233. }
  2234. edesc->src_nents = src_nents;
  2235. edesc->dst_nents = dst_nents;
  2236. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2237. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  2238. desc_bytes;
  2239. sec4_sg_index = 0;
  2240. if (!iv_contig) {
  2241. dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
  2242. sg_to_sec4_sg_last(req->src, src_nents,
  2243. edesc->sec4_sg + 1, 0);
  2244. sec4_sg_index += 1 + src_nents;
  2245. }
  2246. if (dst_nents) {
  2247. sg_to_sec4_sg_last(req->dst, dst_nents,
  2248. edesc->sec4_sg + sec4_sg_index, 0);
  2249. }
  2250. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2251. sec4_sg_bytes, DMA_TO_DEVICE);
  2252. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2253. dev_err(jrdev, "unable to map S/G table\n");
  2254. return ERR_PTR(-ENOMEM);
  2255. }
  2256. edesc->iv_dma = iv_dma;
  2257. #ifdef DEBUG
  2258. print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
  2259. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  2260. sec4_sg_bytes, 1);
  2261. #endif
  2262. *iv_contig_out = iv_contig;
  2263. return edesc;
  2264. }
  2265. static int ablkcipher_encrypt(struct ablkcipher_request *req)
  2266. {
  2267. struct ablkcipher_edesc *edesc;
  2268. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2269. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2270. struct device *jrdev = ctx->jrdev;
  2271. bool iv_contig;
  2272. u32 *desc;
  2273. int ret = 0;
  2274. /* allocate extended descriptor */
  2275. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  2276. CAAM_CMD_SZ, &iv_contig);
  2277. if (IS_ERR(edesc))
  2278. return PTR_ERR(edesc);
  2279. /* Create and submit job descriptor*/
  2280. init_ablkcipher_job(ctx->sh_desc_enc,
  2281. ctx->sh_desc_enc_dma, edesc, req, iv_contig);
  2282. #ifdef DEBUG
  2283. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  2284. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2285. desc_bytes(edesc->hw_desc), 1);
  2286. #endif
  2287. desc = edesc->hw_desc;
  2288. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  2289. if (!ret) {
  2290. ret = -EINPROGRESS;
  2291. } else {
  2292. ablkcipher_unmap(jrdev, edesc, req);
  2293. kfree(edesc);
  2294. }
  2295. return ret;
  2296. }
  2297. static int ablkcipher_decrypt(struct ablkcipher_request *req)
  2298. {
  2299. struct ablkcipher_edesc *edesc;
  2300. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2301. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2302. struct device *jrdev = ctx->jrdev;
  2303. bool iv_contig;
  2304. u32 *desc;
  2305. int ret = 0;
  2306. /* allocate extended descriptor */
  2307. edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
  2308. CAAM_CMD_SZ, &iv_contig);
  2309. if (IS_ERR(edesc))
  2310. return PTR_ERR(edesc);
  2311. /* Create and submit job descriptor*/
  2312. init_ablkcipher_job(ctx->sh_desc_dec,
  2313. ctx->sh_desc_dec_dma, edesc, req, iv_contig);
  2314. desc = edesc->hw_desc;
  2315. #ifdef DEBUG
  2316. print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
  2317. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2318. desc_bytes(edesc->hw_desc), 1);
  2319. #endif
  2320. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
  2321. if (!ret) {
  2322. ret = -EINPROGRESS;
  2323. } else {
  2324. ablkcipher_unmap(jrdev, edesc, req);
  2325. kfree(edesc);
  2326. }
  2327. return ret;
  2328. }
  2329. /*
  2330. * allocate and map the ablkcipher extended descriptor
  2331. * for ablkcipher givencrypt
  2332. */
  2333. static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
  2334. struct skcipher_givcrypt_request *greq,
  2335. int desc_bytes,
  2336. bool *iv_contig_out)
  2337. {
  2338. struct ablkcipher_request *req = &greq->creq;
  2339. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2340. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2341. struct device *jrdev = ctx->jrdev;
  2342. gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
  2343. CRYPTO_TFM_REQ_MAY_SLEEP)) ?
  2344. GFP_KERNEL : GFP_ATOMIC;
  2345. int src_nents, dst_nents = 0, sec4_sg_bytes;
  2346. struct ablkcipher_edesc *edesc;
  2347. dma_addr_t iv_dma = 0;
  2348. bool iv_contig = false;
  2349. int sgc;
  2350. int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
  2351. int sec4_sg_index;
  2352. src_nents = sg_count(req->src, req->nbytes);
  2353. if (unlikely(req->dst != req->src))
  2354. dst_nents = sg_count(req->dst, req->nbytes);
  2355. if (likely(req->src == req->dst)) {
  2356. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  2357. DMA_BIDIRECTIONAL);
  2358. } else {
  2359. sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
  2360. DMA_TO_DEVICE);
  2361. sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
  2362. DMA_FROM_DEVICE);
  2363. }
  2364. /*
  2365. * Check if iv can be contiguous with source and destination.
  2366. * If so, include it. If not, create scatterlist.
  2367. */
  2368. iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
  2369. if (dma_mapping_error(jrdev, iv_dma)) {
  2370. dev_err(jrdev, "unable to map IV\n");
  2371. return ERR_PTR(-ENOMEM);
  2372. }
  2373. if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
  2374. iv_contig = true;
  2375. else
  2376. dst_nents = dst_nents ? : 1;
  2377. sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
  2378. sizeof(struct sec4_sg_entry);
  2379. /* allocate space for base edesc and hw desc commands, link tables */
  2380. edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
  2381. GFP_DMA | flags);
  2382. if (!edesc) {
  2383. dev_err(jrdev, "could not allocate extended descriptor\n");
  2384. return ERR_PTR(-ENOMEM);
  2385. }
  2386. edesc->src_nents = src_nents;
  2387. edesc->dst_nents = dst_nents;
  2388. edesc->sec4_sg_bytes = sec4_sg_bytes;
  2389. edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
  2390. desc_bytes;
  2391. sec4_sg_index = 0;
  2392. if (src_nents) {
  2393. sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
  2394. sec4_sg_index += src_nents;
  2395. }
  2396. if (!iv_contig) {
  2397. dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
  2398. iv_dma, ivsize, 0);
  2399. sec4_sg_index += 1;
  2400. sg_to_sec4_sg_last(req->dst, dst_nents,
  2401. edesc->sec4_sg + sec4_sg_index, 0);
  2402. }
  2403. edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
  2404. sec4_sg_bytes, DMA_TO_DEVICE);
  2405. if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
  2406. dev_err(jrdev, "unable to map S/G table\n");
  2407. return ERR_PTR(-ENOMEM);
  2408. }
  2409. edesc->iv_dma = iv_dma;
  2410. #ifdef DEBUG
  2411. print_hex_dump(KERN_ERR,
  2412. "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
  2413. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  2414. sec4_sg_bytes, 1);
  2415. #endif
  2416. *iv_contig_out = iv_contig;
  2417. return edesc;
  2418. }
  2419. static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
  2420. {
  2421. struct ablkcipher_request *req = &creq->creq;
  2422. struct ablkcipher_edesc *edesc;
  2423. struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
  2424. struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
  2425. struct device *jrdev = ctx->jrdev;
  2426. bool iv_contig;
  2427. u32 *desc;
  2428. int ret = 0;
  2429. /* allocate extended descriptor */
  2430. edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
  2431. CAAM_CMD_SZ, &iv_contig);
  2432. if (IS_ERR(edesc))
  2433. return PTR_ERR(edesc);
  2434. /* Create and submit job descriptor*/
  2435. init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
  2436. edesc, req, iv_contig);
  2437. #ifdef DEBUG
  2438. print_hex_dump(KERN_ERR,
  2439. "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
  2440. DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
  2441. desc_bytes(edesc->hw_desc), 1);
  2442. #endif
  2443. desc = edesc->hw_desc;
  2444. ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
  2445. if (!ret) {
  2446. ret = -EINPROGRESS;
  2447. } else {
  2448. ablkcipher_unmap(jrdev, edesc, req);
  2449. kfree(edesc);
  2450. }
  2451. return ret;
  2452. }
  2453. #define template_aead template_u.aead
  2454. #define template_ablkcipher template_u.ablkcipher
  2455. struct caam_alg_template {
  2456. char name[CRYPTO_MAX_ALG_NAME];
  2457. char driver_name[CRYPTO_MAX_ALG_NAME];
  2458. unsigned int blocksize;
  2459. u32 type;
  2460. union {
  2461. struct ablkcipher_alg ablkcipher;
  2462. } template_u;
  2463. u32 class1_alg_type;
  2464. u32 class2_alg_type;
  2465. u32 alg_op;
  2466. };
  2467. static struct caam_alg_template driver_algs[] = {
  2468. /* ablkcipher descriptor */
  2469. {
  2470. .name = "cbc(aes)",
  2471. .driver_name = "cbc-aes-caam",
  2472. .blocksize = AES_BLOCK_SIZE,
  2473. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2474. .template_ablkcipher = {
  2475. .setkey = ablkcipher_setkey,
  2476. .encrypt = ablkcipher_encrypt,
  2477. .decrypt = ablkcipher_decrypt,
  2478. .givencrypt = ablkcipher_givencrypt,
  2479. .geniv = "<built-in>",
  2480. .min_keysize = AES_MIN_KEY_SIZE,
  2481. .max_keysize = AES_MAX_KEY_SIZE,
  2482. .ivsize = AES_BLOCK_SIZE,
  2483. },
  2484. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2485. },
  2486. {
  2487. .name = "cbc(des3_ede)",
  2488. .driver_name = "cbc-3des-caam",
  2489. .blocksize = DES3_EDE_BLOCK_SIZE,
  2490. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2491. .template_ablkcipher = {
  2492. .setkey = ablkcipher_setkey,
  2493. .encrypt = ablkcipher_encrypt,
  2494. .decrypt = ablkcipher_decrypt,
  2495. .givencrypt = ablkcipher_givencrypt,
  2496. .geniv = "<built-in>",
  2497. .min_keysize = DES3_EDE_KEY_SIZE,
  2498. .max_keysize = DES3_EDE_KEY_SIZE,
  2499. .ivsize = DES3_EDE_BLOCK_SIZE,
  2500. },
  2501. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  2502. },
  2503. {
  2504. .name = "cbc(des)",
  2505. .driver_name = "cbc-des-caam",
  2506. .blocksize = DES_BLOCK_SIZE,
  2507. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2508. .template_ablkcipher = {
  2509. .setkey = ablkcipher_setkey,
  2510. .encrypt = ablkcipher_encrypt,
  2511. .decrypt = ablkcipher_decrypt,
  2512. .givencrypt = ablkcipher_givencrypt,
  2513. .geniv = "<built-in>",
  2514. .min_keysize = DES_KEY_SIZE,
  2515. .max_keysize = DES_KEY_SIZE,
  2516. .ivsize = DES_BLOCK_SIZE,
  2517. },
  2518. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2519. },
  2520. {
  2521. .name = "ctr(aes)",
  2522. .driver_name = "ctr-aes-caam",
  2523. .blocksize = 1,
  2524. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2525. .template_ablkcipher = {
  2526. .setkey = ablkcipher_setkey,
  2527. .encrypt = ablkcipher_encrypt,
  2528. .decrypt = ablkcipher_decrypt,
  2529. .geniv = "chainiv",
  2530. .min_keysize = AES_MIN_KEY_SIZE,
  2531. .max_keysize = AES_MAX_KEY_SIZE,
  2532. .ivsize = AES_BLOCK_SIZE,
  2533. },
  2534. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  2535. },
  2536. {
  2537. .name = "rfc3686(ctr(aes))",
  2538. .driver_name = "rfc3686-ctr-aes-caam",
  2539. .blocksize = 1,
  2540. .type = CRYPTO_ALG_TYPE_GIVCIPHER,
  2541. .template_ablkcipher = {
  2542. .setkey = ablkcipher_setkey,
  2543. .encrypt = ablkcipher_encrypt,
  2544. .decrypt = ablkcipher_decrypt,
  2545. .givencrypt = ablkcipher_givencrypt,
  2546. .geniv = "<built-in>",
  2547. .min_keysize = AES_MIN_KEY_SIZE +
  2548. CTR_RFC3686_NONCE_SIZE,
  2549. .max_keysize = AES_MAX_KEY_SIZE +
  2550. CTR_RFC3686_NONCE_SIZE,
  2551. .ivsize = CTR_RFC3686_IV_SIZE,
  2552. },
  2553. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
  2554. },
  2555. {
  2556. .name = "xts(aes)",
  2557. .driver_name = "xts-aes-caam",
  2558. .blocksize = AES_BLOCK_SIZE,
  2559. .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
  2560. .template_ablkcipher = {
  2561. .setkey = xts_ablkcipher_setkey,
  2562. .encrypt = ablkcipher_encrypt,
  2563. .decrypt = ablkcipher_decrypt,
  2564. .geniv = "eseqiv",
  2565. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  2566. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  2567. .ivsize = AES_BLOCK_SIZE,
  2568. },
  2569. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
  2570. },
  2571. };
  2572. static struct caam_aead_alg driver_aeads[] = {
  2573. {
  2574. .aead = {
  2575. .base = {
  2576. .cra_name = "rfc4106(gcm(aes))",
  2577. .cra_driver_name = "rfc4106-gcm-aes-caam",
  2578. .cra_blocksize = 1,
  2579. },
  2580. .setkey = rfc4106_setkey,
  2581. .setauthsize = rfc4106_setauthsize,
  2582. .encrypt = ipsec_gcm_encrypt,
  2583. .decrypt = ipsec_gcm_decrypt,
  2584. .ivsize = 8,
  2585. .maxauthsize = AES_BLOCK_SIZE,
  2586. },
  2587. .caam = {
  2588. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  2589. },
  2590. },
  2591. {
  2592. .aead = {
  2593. .base = {
  2594. .cra_name = "rfc4543(gcm(aes))",
  2595. .cra_driver_name = "rfc4543-gcm-aes-caam",
  2596. .cra_blocksize = 1,
  2597. },
  2598. .setkey = rfc4543_setkey,
  2599. .setauthsize = rfc4543_setauthsize,
  2600. .encrypt = ipsec_gcm_encrypt,
  2601. .decrypt = ipsec_gcm_decrypt,
  2602. .ivsize = 8,
  2603. .maxauthsize = AES_BLOCK_SIZE,
  2604. },
  2605. .caam = {
  2606. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  2607. },
  2608. },
  2609. /* Galois Counter Mode */
  2610. {
  2611. .aead = {
  2612. .base = {
  2613. .cra_name = "gcm(aes)",
  2614. .cra_driver_name = "gcm-aes-caam",
  2615. .cra_blocksize = 1,
  2616. },
  2617. .setkey = gcm_setkey,
  2618. .setauthsize = gcm_setauthsize,
  2619. .encrypt = gcm_encrypt,
  2620. .decrypt = gcm_decrypt,
  2621. .ivsize = 12,
  2622. .maxauthsize = AES_BLOCK_SIZE,
  2623. },
  2624. .caam = {
  2625. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  2626. },
  2627. },
  2628. /* single-pass ipsec_esp descriptor */
  2629. {
  2630. .aead = {
  2631. .base = {
  2632. .cra_name = "authenc(hmac(md5),"
  2633. "ecb(cipher_null))",
  2634. .cra_driver_name = "authenc-hmac-md5-"
  2635. "ecb-cipher_null-caam",
  2636. .cra_blocksize = NULL_BLOCK_SIZE,
  2637. },
  2638. .setkey = aead_setkey,
  2639. .setauthsize = aead_setauthsize,
  2640. .encrypt = aead_encrypt,
  2641. .decrypt = aead_decrypt,
  2642. .ivsize = NULL_IV_SIZE,
  2643. .maxauthsize = MD5_DIGEST_SIZE,
  2644. },
  2645. .caam = {
  2646. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2647. OP_ALG_AAI_HMAC_PRECOMP,
  2648. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2649. },
  2650. },
  2651. {
  2652. .aead = {
  2653. .base = {
  2654. .cra_name = "authenc(hmac(sha1),"
  2655. "ecb(cipher_null))",
  2656. .cra_driver_name = "authenc-hmac-sha1-"
  2657. "ecb-cipher_null-caam",
  2658. .cra_blocksize = NULL_BLOCK_SIZE,
  2659. },
  2660. .setkey = aead_setkey,
  2661. .setauthsize = aead_setauthsize,
  2662. .encrypt = aead_encrypt,
  2663. .decrypt = aead_decrypt,
  2664. .ivsize = NULL_IV_SIZE,
  2665. .maxauthsize = SHA1_DIGEST_SIZE,
  2666. },
  2667. .caam = {
  2668. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2669. OP_ALG_AAI_HMAC_PRECOMP,
  2670. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2671. },
  2672. },
  2673. {
  2674. .aead = {
  2675. .base = {
  2676. .cra_name = "authenc(hmac(sha224),"
  2677. "ecb(cipher_null))",
  2678. .cra_driver_name = "authenc-hmac-sha224-"
  2679. "ecb-cipher_null-caam",
  2680. .cra_blocksize = NULL_BLOCK_SIZE,
  2681. },
  2682. .setkey = aead_setkey,
  2683. .setauthsize = aead_setauthsize,
  2684. .encrypt = aead_encrypt,
  2685. .decrypt = aead_decrypt,
  2686. .ivsize = NULL_IV_SIZE,
  2687. .maxauthsize = SHA224_DIGEST_SIZE,
  2688. },
  2689. .caam = {
  2690. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2691. OP_ALG_AAI_HMAC_PRECOMP,
  2692. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2693. },
  2694. },
  2695. {
  2696. .aead = {
  2697. .base = {
  2698. .cra_name = "authenc(hmac(sha256),"
  2699. "ecb(cipher_null))",
  2700. .cra_driver_name = "authenc-hmac-sha256-"
  2701. "ecb-cipher_null-caam",
  2702. .cra_blocksize = NULL_BLOCK_SIZE,
  2703. },
  2704. .setkey = aead_setkey,
  2705. .setauthsize = aead_setauthsize,
  2706. .encrypt = aead_encrypt,
  2707. .decrypt = aead_decrypt,
  2708. .ivsize = NULL_IV_SIZE,
  2709. .maxauthsize = SHA256_DIGEST_SIZE,
  2710. },
  2711. .caam = {
  2712. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2713. OP_ALG_AAI_HMAC_PRECOMP,
  2714. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2715. },
  2716. },
  2717. {
  2718. .aead = {
  2719. .base = {
  2720. .cra_name = "authenc(hmac(sha384),"
  2721. "ecb(cipher_null))",
  2722. .cra_driver_name = "authenc-hmac-sha384-"
  2723. "ecb-cipher_null-caam",
  2724. .cra_blocksize = NULL_BLOCK_SIZE,
  2725. },
  2726. .setkey = aead_setkey,
  2727. .setauthsize = aead_setauthsize,
  2728. .encrypt = aead_encrypt,
  2729. .decrypt = aead_decrypt,
  2730. .ivsize = NULL_IV_SIZE,
  2731. .maxauthsize = SHA384_DIGEST_SIZE,
  2732. },
  2733. .caam = {
  2734. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2735. OP_ALG_AAI_HMAC_PRECOMP,
  2736. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2737. },
  2738. },
  2739. {
  2740. .aead = {
  2741. .base = {
  2742. .cra_name = "authenc(hmac(sha512),"
  2743. "ecb(cipher_null))",
  2744. .cra_driver_name = "authenc-hmac-sha512-"
  2745. "ecb-cipher_null-caam",
  2746. .cra_blocksize = NULL_BLOCK_SIZE,
  2747. },
  2748. .setkey = aead_setkey,
  2749. .setauthsize = aead_setauthsize,
  2750. .encrypt = aead_encrypt,
  2751. .decrypt = aead_decrypt,
  2752. .ivsize = NULL_IV_SIZE,
  2753. .maxauthsize = SHA512_DIGEST_SIZE,
  2754. },
  2755. .caam = {
  2756. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2757. OP_ALG_AAI_HMAC_PRECOMP,
  2758. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  2759. },
  2760. },
  2761. {
  2762. .aead = {
  2763. .base = {
  2764. .cra_name = "authenc(hmac(md5),cbc(aes))",
  2765. .cra_driver_name = "authenc-hmac-md5-"
  2766. "cbc-aes-caam",
  2767. .cra_blocksize = AES_BLOCK_SIZE,
  2768. },
  2769. .setkey = aead_setkey,
  2770. .setauthsize = aead_setauthsize,
  2771. .encrypt = aead_encrypt,
  2772. .decrypt = aead_decrypt,
  2773. .ivsize = AES_BLOCK_SIZE,
  2774. .maxauthsize = MD5_DIGEST_SIZE,
  2775. },
  2776. .caam = {
  2777. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2778. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2779. OP_ALG_AAI_HMAC_PRECOMP,
  2780. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2781. },
  2782. },
  2783. {
  2784. .aead = {
  2785. .base = {
  2786. .cra_name = "echainiv(authenc(hmac(md5),"
  2787. "cbc(aes)))",
  2788. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  2789. "cbc-aes-caam",
  2790. .cra_blocksize = AES_BLOCK_SIZE,
  2791. },
  2792. .setkey = aead_setkey,
  2793. .setauthsize = aead_setauthsize,
  2794. .encrypt = aead_encrypt,
  2795. .decrypt = aead_decrypt,
  2796. .ivsize = AES_BLOCK_SIZE,
  2797. .maxauthsize = MD5_DIGEST_SIZE,
  2798. },
  2799. .caam = {
  2800. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2801. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  2802. OP_ALG_AAI_HMAC_PRECOMP,
  2803. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  2804. .geniv = true,
  2805. },
  2806. },
  2807. {
  2808. .aead = {
  2809. .base = {
  2810. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  2811. .cra_driver_name = "authenc-hmac-sha1-"
  2812. "cbc-aes-caam",
  2813. .cra_blocksize = AES_BLOCK_SIZE,
  2814. },
  2815. .setkey = aead_setkey,
  2816. .setauthsize = aead_setauthsize,
  2817. .encrypt = aead_encrypt,
  2818. .decrypt = aead_decrypt,
  2819. .ivsize = AES_BLOCK_SIZE,
  2820. .maxauthsize = SHA1_DIGEST_SIZE,
  2821. },
  2822. .caam = {
  2823. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2824. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2825. OP_ALG_AAI_HMAC_PRECOMP,
  2826. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2827. },
  2828. },
  2829. {
  2830. .aead = {
  2831. .base = {
  2832. .cra_name = "echainiv(authenc(hmac(sha1),"
  2833. "cbc(aes)))",
  2834. .cra_driver_name = "echainiv-authenc-"
  2835. "hmac-sha1-cbc-aes-caam",
  2836. .cra_blocksize = AES_BLOCK_SIZE,
  2837. },
  2838. .setkey = aead_setkey,
  2839. .setauthsize = aead_setauthsize,
  2840. .encrypt = aead_encrypt,
  2841. .decrypt = aead_decrypt,
  2842. .ivsize = AES_BLOCK_SIZE,
  2843. .maxauthsize = SHA1_DIGEST_SIZE,
  2844. },
  2845. .caam = {
  2846. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2847. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2848. OP_ALG_AAI_HMAC_PRECOMP,
  2849. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  2850. .geniv = true,
  2851. },
  2852. },
  2853. {
  2854. .aead = {
  2855. .base = {
  2856. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  2857. .cra_driver_name = "authenc-hmac-sha224-"
  2858. "cbc-aes-caam",
  2859. .cra_blocksize = AES_BLOCK_SIZE,
  2860. },
  2861. .setkey = aead_setkey,
  2862. .setauthsize = aead_setauthsize,
  2863. .encrypt = aead_encrypt,
  2864. .decrypt = aead_decrypt,
  2865. .ivsize = AES_BLOCK_SIZE,
  2866. .maxauthsize = SHA224_DIGEST_SIZE,
  2867. },
  2868. .caam = {
  2869. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2870. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2871. OP_ALG_AAI_HMAC_PRECOMP,
  2872. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2873. },
  2874. },
  2875. {
  2876. .aead = {
  2877. .base = {
  2878. .cra_name = "echainiv(authenc(hmac(sha224),"
  2879. "cbc(aes)))",
  2880. .cra_driver_name = "echainiv-authenc-"
  2881. "hmac-sha224-cbc-aes-caam",
  2882. .cra_blocksize = AES_BLOCK_SIZE,
  2883. },
  2884. .setkey = aead_setkey,
  2885. .setauthsize = aead_setauthsize,
  2886. .encrypt = aead_encrypt,
  2887. .decrypt = aead_decrypt,
  2888. .ivsize = AES_BLOCK_SIZE,
  2889. .maxauthsize = SHA224_DIGEST_SIZE,
  2890. },
  2891. .caam = {
  2892. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2893. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2894. OP_ALG_AAI_HMAC_PRECOMP,
  2895. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  2896. .geniv = true,
  2897. },
  2898. },
  2899. {
  2900. .aead = {
  2901. .base = {
  2902. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  2903. .cra_driver_name = "authenc-hmac-sha256-"
  2904. "cbc-aes-caam",
  2905. .cra_blocksize = AES_BLOCK_SIZE,
  2906. },
  2907. .setkey = aead_setkey,
  2908. .setauthsize = aead_setauthsize,
  2909. .encrypt = aead_encrypt,
  2910. .decrypt = aead_decrypt,
  2911. .ivsize = AES_BLOCK_SIZE,
  2912. .maxauthsize = SHA256_DIGEST_SIZE,
  2913. },
  2914. .caam = {
  2915. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2916. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2917. OP_ALG_AAI_HMAC_PRECOMP,
  2918. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2919. },
  2920. },
  2921. {
  2922. .aead = {
  2923. .base = {
  2924. .cra_name = "echainiv(authenc(hmac(sha256),"
  2925. "cbc(aes)))",
  2926. .cra_driver_name = "echainiv-authenc-"
  2927. "hmac-sha256-cbc-aes-caam",
  2928. .cra_blocksize = AES_BLOCK_SIZE,
  2929. },
  2930. .setkey = aead_setkey,
  2931. .setauthsize = aead_setauthsize,
  2932. .encrypt = aead_encrypt,
  2933. .decrypt = aead_decrypt,
  2934. .ivsize = AES_BLOCK_SIZE,
  2935. .maxauthsize = SHA256_DIGEST_SIZE,
  2936. },
  2937. .caam = {
  2938. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2939. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2940. OP_ALG_AAI_HMAC_PRECOMP,
  2941. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  2942. .geniv = true,
  2943. },
  2944. },
  2945. {
  2946. .aead = {
  2947. .base = {
  2948. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  2949. .cra_driver_name = "authenc-hmac-sha384-"
  2950. "cbc-aes-caam",
  2951. .cra_blocksize = AES_BLOCK_SIZE,
  2952. },
  2953. .setkey = aead_setkey,
  2954. .setauthsize = aead_setauthsize,
  2955. .encrypt = aead_encrypt,
  2956. .decrypt = aead_decrypt,
  2957. .ivsize = AES_BLOCK_SIZE,
  2958. .maxauthsize = SHA384_DIGEST_SIZE,
  2959. },
  2960. .caam = {
  2961. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2962. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2963. OP_ALG_AAI_HMAC_PRECOMP,
  2964. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2965. },
  2966. },
  2967. {
  2968. .aead = {
  2969. .base = {
  2970. .cra_name = "echainiv(authenc(hmac(sha384),"
  2971. "cbc(aes)))",
  2972. .cra_driver_name = "echainiv-authenc-"
  2973. "hmac-sha384-cbc-aes-caam",
  2974. .cra_blocksize = AES_BLOCK_SIZE,
  2975. },
  2976. .setkey = aead_setkey,
  2977. .setauthsize = aead_setauthsize,
  2978. .encrypt = aead_encrypt,
  2979. .decrypt = aead_decrypt,
  2980. .ivsize = AES_BLOCK_SIZE,
  2981. .maxauthsize = SHA384_DIGEST_SIZE,
  2982. },
  2983. .caam = {
  2984. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  2985. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2986. OP_ALG_AAI_HMAC_PRECOMP,
  2987. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  2988. .geniv = true,
  2989. },
  2990. },
  2991. {
  2992. .aead = {
  2993. .base = {
  2994. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  2995. .cra_driver_name = "authenc-hmac-sha512-"
  2996. "cbc-aes-caam",
  2997. .cra_blocksize = AES_BLOCK_SIZE,
  2998. },
  2999. .setkey = aead_setkey,
  3000. .setauthsize = aead_setauthsize,
  3001. .encrypt = aead_encrypt,
  3002. .decrypt = aead_decrypt,
  3003. .ivsize = AES_BLOCK_SIZE,
  3004. .maxauthsize = SHA512_DIGEST_SIZE,
  3005. },
  3006. .caam = {
  3007. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  3008. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3009. OP_ALG_AAI_HMAC_PRECOMP,
  3010. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3011. },
  3012. },
  3013. {
  3014. .aead = {
  3015. .base = {
  3016. .cra_name = "echainiv(authenc(hmac(sha512),"
  3017. "cbc(aes)))",
  3018. .cra_driver_name = "echainiv-authenc-"
  3019. "hmac-sha512-cbc-aes-caam",
  3020. .cra_blocksize = AES_BLOCK_SIZE,
  3021. },
  3022. .setkey = aead_setkey,
  3023. .setauthsize = aead_setauthsize,
  3024. .encrypt = aead_encrypt,
  3025. .decrypt = aead_decrypt,
  3026. .ivsize = AES_BLOCK_SIZE,
  3027. .maxauthsize = SHA512_DIGEST_SIZE,
  3028. },
  3029. .caam = {
  3030. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  3031. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3032. OP_ALG_AAI_HMAC_PRECOMP,
  3033. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3034. .geniv = true,
  3035. },
  3036. },
  3037. {
  3038. .aead = {
  3039. .base = {
  3040. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  3041. .cra_driver_name = "authenc-hmac-md5-"
  3042. "cbc-des3_ede-caam",
  3043. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3044. },
  3045. .setkey = aead_setkey,
  3046. .setauthsize = aead_setauthsize,
  3047. .encrypt = aead_encrypt,
  3048. .decrypt = aead_decrypt,
  3049. .ivsize = DES3_EDE_BLOCK_SIZE,
  3050. .maxauthsize = MD5_DIGEST_SIZE,
  3051. },
  3052. .caam = {
  3053. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3054. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3055. OP_ALG_AAI_HMAC_PRECOMP,
  3056. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3057. }
  3058. },
  3059. {
  3060. .aead = {
  3061. .base = {
  3062. .cra_name = "echainiv(authenc(hmac(md5),"
  3063. "cbc(des3_ede)))",
  3064. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  3065. "cbc-des3_ede-caam",
  3066. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3067. },
  3068. .setkey = aead_setkey,
  3069. .setauthsize = aead_setauthsize,
  3070. .encrypt = aead_encrypt,
  3071. .decrypt = aead_decrypt,
  3072. .ivsize = DES3_EDE_BLOCK_SIZE,
  3073. .maxauthsize = MD5_DIGEST_SIZE,
  3074. },
  3075. .caam = {
  3076. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3077. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3078. OP_ALG_AAI_HMAC_PRECOMP,
  3079. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3080. .geniv = true,
  3081. }
  3082. },
  3083. {
  3084. .aead = {
  3085. .base = {
  3086. .cra_name = "authenc(hmac(sha1),"
  3087. "cbc(des3_ede))",
  3088. .cra_driver_name = "authenc-hmac-sha1-"
  3089. "cbc-des3_ede-caam",
  3090. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3091. },
  3092. .setkey = aead_setkey,
  3093. .setauthsize = aead_setauthsize,
  3094. .encrypt = aead_encrypt,
  3095. .decrypt = aead_decrypt,
  3096. .ivsize = DES3_EDE_BLOCK_SIZE,
  3097. .maxauthsize = SHA1_DIGEST_SIZE,
  3098. },
  3099. .caam = {
  3100. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3101. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3102. OP_ALG_AAI_HMAC_PRECOMP,
  3103. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3104. },
  3105. },
  3106. {
  3107. .aead = {
  3108. .base = {
  3109. .cra_name = "echainiv(authenc(hmac(sha1),"
  3110. "cbc(des3_ede)))",
  3111. .cra_driver_name = "echainiv-authenc-"
  3112. "hmac-sha1-"
  3113. "cbc-des3_ede-caam",
  3114. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3115. },
  3116. .setkey = aead_setkey,
  3117. .setauthsize = aead_setauthsize,
  3118. .encrypt = aead_encrypt,
  3119. .decrypt = aead_decrypt,
  3120. .ivsize = DES3_EDE_BLOCK_SIZE,
  3121. .maxauthsize = SHA1_DIGEST_SIZE,
  3122. },
  3123. .caam = {
  3124. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3125. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3126. OP_ALG_AAI_HMAC_PRECOMP,
  3127. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3128. .geniv = true,
  3129. },
  3130. },
  3131. {
  3132. .aead = {
  3133. .base = {
  3134. .cra_name = "authenc(hmac(sha224),"
  3135. "cbc(des3_ede))",
  3136. .cra_driver_name = "authenc-hmac-sha224-"
  3137. "cbc-des3_ede-caam",
  3138. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3139. },
  3140. .setkey = aead_setkey,
  3141. .setauthsize = aead_setauthsize,
  3142. .encrypt = aead_encrypt,
  3143. .decrypt = aead_decrypt,
  3144. .ivsize = DES3_EDE_BLOCK_SIZE,
  3145. .maxauthsize = SHA224_DIGEST_SIZE,
  3146. },
  3147. .caam = {
  3148. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3149. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3150. OP_ALG_AAI_HMAC_PRECOMP,
  3151. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3152. },
  3153. },
  3154. {
  3155. .aead = {
  3156. .base = {
  3157. .cra_name = "echainiv(authenc(hmac(sha224),"
  3158. "cbc(des3_ede)))",
  3159. .cra_driver_name = "echainiv-authenc-"
  3160. "hmac-sha224-"
  3161. "cbc-des3_ede-caam",
  3162. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3163. },
  3164. .setkey = aead_setkey,
  3165. .setauthsize = aead_setauthsize,
  3166. .encrypt = aead_encrypt,
  3167. .decrypt = aead_decrypt,
  3168. .ivsize = DES3_EDE_BLOCK_SIZE,
  3169. .maxauthsize = SHA224_DIGEST_SIZE,
  3170. },
  3171. .caam = {
  3172. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3173. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3174. OP_ALG_AAI_HMAC_PRECOMP,
  3175. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3176. .geniv = true,
  3177. },
  3178. },
  3179. {
  3180. .aead = {
  3181. .base = {
  3182. .cra_name = "authenc(hmac(sha256),"
  3183. "cbc(des3_ede))",
  3184. .cra_driver_name = "authenc-hmac-sha256-"
  3185. "cbc-des3_ede-caam",
  3186. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3187. },
  3188. .setkey = aead_setkey,
  3189. .setauthsize = aead_setauthsize,
  3190. .encrypt = aead_encrypt,
  3191. .decrypt = aead_decrypt,
  3192. .ivsize = DES3_EDE_BLOCK_SIZE,
  3193. .maxauthsize = SHA256_DIGEST_SIZE,
  3194. },
  3195. .caam = {
  3196. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3197. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3198. OP_ALG_AAI_HMAC_PRECOMP,
  3199. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3200. },
  3201. },
  3202. {
  3203. .aead = {
  3204. .base = {
  3205. .cra_name = "echainiv(authenc(hmac(sha256),"
  3206. "cbc(des3_ede)))",
  3207. .cra_driver_name = "echainiv-authenc-"
  3208. "hmac-sha256-"
  3209. "cbc-des3_ede-caam",
  3210. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3211. },
  3212. .setkey = aead_setkey,
  3213. .setauthsize = aead_setauthsize,
  3214. .encrypt = aead_encrypt,
  3215. .decrypt = aead_decrypt,
  3216. .ivsize = DES3_EDE_BLOCK_SIZE,
  3217. .maxauthsize = SHA256_DIGEST_SIZE,
  3218. },
  3219. .caam = {
  3220. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3221. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3222. OP_ALG_AAI_HMAC_PRECOMP,
  3223. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3224. .geniv = true,
  3225. },
  3226. },
  3227. {
  3228. .aead = {
  3229. .base = {
  3230. .cra_name = "authenc(hmac(sha384),"
  3231. "cbc(des3_ede))",
  3232. .cra_driver_name = "authenc-hmac-sha384-"
  3233. "cbc-des3_ede-caam",
  3234. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3235. },
  3236. .setkey = aead_setkey,
  3237. .setauthsize = aead_setauthsize,
  3238. .encrypt = aead_encrypt,
  3239. .decrypt = aead_decrypt,
  3240. .ivsize = DES3_EDE_BLOCK_SIZE,
  3241. .maxauthsize = SHA384_DIGEST_SIZE,
  3242. },
  3243. .caam = {
  3244. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3245. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3246. OP_ALG_AAI_HMAC_PRECOMP,
  3247. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3248. },
  3249. },
  3250. {
  3251. .aead = {
  3252. .base = {
  3253. .cra_name = "echainiv(authenc(hmac(sha384),"
  3254. "cbc(des3_ede)))",
  3255. .cra_driver_name = "echainiv-authenc-"
  3256. "hmac-sha384-"
  3257. "cbc-des3_ede-caam",
  3258. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3259. },
  3260. .setkey = aead_setkey,
  3261. .setauthsize = aead_setauthsize,
  3262. .encrypt = aead_encrypt,
  3263. .decrypt = aead_decrypt,
  3264. .ivsize = DES3_EDE_BLOCK_SIZE,
  3265. .maxauthsize = SHA384_DIGEST_SIZE,
  3266. },
  3267. .caam = {
  3268. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3269. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3270. OP_ALG_AAI_HMAC_PRECOMP,
  3271. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3272. .geniv = true,
  3273. },
  3274. },
  3275. {
  3276. .aead = {
  3277. .base = {
  3278. .cra_name = "authenc(hmac(sha512),"
  3279. "cbc(des3_ede))",
  3280. .cra_driver_name = "authenc-hmac-sha512-"
  3281. "cbc-des3_ede-caam",
  3282. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3283. },
  3284. .setkey = aead_setkey,
  3285. .setauthsize = aead_setauthsize,
  3286. .encrypt = aead_encrypt,
  3287. .decrypt = aead_decrypt,
  3288. .ivsize = DES3_EDE_BLOCK_SIZE,
  3289. .maxauthsize = SHA512_DIGEST_SIZE,
  3290. },
  3291. .caam = {
  3292. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3293. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3294. OP_ALG_AAI_HMAC_PRECOMP,
  3295. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3296. },
  3297. },
  3298. {
  3299. .aead = {
  3300. .base = {
  3301. .cra_name = "echainiv(authenc(hmac(sha512),"
  3302. "cbc(des3_ede)))",
  3303. .cra_driver_name = "echainiv-authenc-"
  3304. "hmac-sha512-"
  3305. "cbc-des3_ede-caam",
  3306. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  3307. },
  3308. .setkey = aead_setkey,
  3309. .setauthsize = aead_setauthsize,
  3310. .encrypt = aead_encrypt,
  3311. .decrypt = aead_decrypt,
  3312. .ivsize = DES3_EDE_BLOCK_SIZE,
  3313. .maxauthsize = SHA512_DIGEST_SIZE,
  3314. },
  3315. .caam = {
  3316. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  3317. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3318. OP_ALG_AAI_HMAC_PRECOMP,
  3319. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3320. .geniv = true,
  3321. },
  3322. },
  3323. {
  3324. .aead = {
  3325. .base = {
  3326. .cra_name = "authenc(hmac(md5),cbc(des))",
  3327. .cra_driver_name = "authenc-hmac-md5-"
  3328. "cbc-des-caam",
  3329. .cra_blocksize = DES_BLOCK_SIZE,
  3330. },
  3331. .setkey = aead_setkey,
  3332. .setauthsize = aead_setauthsize,
  3333. .encrypt = aead_encrypt,
  3334. .decrypt = aead_decrypt,
  3335. .ivsize = DES_BLOCK_SIZE,
  3336. .maxauthsize = MD5_DIGEST_SIZE,
  3337. },
  3338. .caam = {
  3339. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3340. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3341. OP_ALG_AAI_HMAC_PRECOMP,
  3342. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3343. },
  3344. },
  3345. {
  3346. .aead = {
  3347. .base = {
  3348. .cra_name = "echainiv(authenc(hmac(md5),"
  3349. "cbc(des)))",
  3350. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  3351. "cbc-des-caam",
  3352. .cra_blocksize = DES_BLOCK_SIZE,
  3353. },
  3354. .setkey = aead_setkey,
  3355. .setauthsize = aead_setauthsize,
  3356. .encrypt = aead_encrypt,
  3357. .decrypt = aead_decrypt,
  3358. .ivsize = DES_BLOCK_SIZE,
  3359. .maxauthsize = MD5_DIGEST_SIZE,
  3360. },
  3361. .caam = {
  3362. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3363. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3364. OP_ALG_AAI_HMAC_PRECOMP,
  3365. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3366. .geniv = true,
  3367. },
  3368. },
  3369. {
  3370. .aead = {
  3371. .base = {
  3372. .cra_name = "authenc(hmac(sha1),cbc(des))",
  3373. .cra_driver_name = "authenc-hmac-sha1-"
  3374. "cbc-des-caam",
  3375. .cra_blocksize = DES_BLOCK_SIZE,
  3376. },
  3377. .setkey = aead_setkey,
  3378. .setauthsize = aead_setauthsize,
  3379. .encrypt = aead_encrypt,
  3380. .decrypt = aead_decrypt,
  3381. .ivsize = DES_BLOCK_SIZE,
  3382. .maxauthsize = SHA1_DIGEST_SIZE,
  3383. },
  3384. .caam = {
  3385. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3386. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3387. OP_ALG_AAI_HMAC_PRECOMP,
  3388. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3389. },
  3390. },
  3391. {
  3392. .aead = {
  3393. .base = {
  3394. .cra_name = "echainiv(authenc(hmac(sha1),"
  3395. "cbc(des)))",
  3396. .cra_driver_name = "echainiv-authenc-"
  3397. "hmac-sha1-cbc-des-caam",
  3398. .cra_blocksize = DES_BLOCK_SIZE,
  3399. },
  3400. .setkey = aead_setkey,
  3401. .setauthsize = aead_setauthsize,
  3402. .encrypt = aead_encrypt,
  3403. .decrypt = aead_decrypt,
  3404. .ivsize = DES_BLOCK_SIZE,
  3405. .maxauthsize = SHA1_DIGEST_SIZE,
  3406. },
  3407. .caam = {
  3408. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3409. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3410. OP_ALG_AAI_HMAC_PRECOMP,
  3411. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3412. .geniv = true,
  3413. },
  3414. },
  3415. {
  3416. .aead = {
  3417. .base = {
  3418. .cra_name = "authenc(hmac(sha224),cbc(des))",
  3419. .cra_driver_name = "authenc-hmac-sha224-"
  3420. "cbc-des-caam",
  3421. .cra_blocksize = DES_BLOCK_SIZE,
  3422. },
  3423. .setkey = aead_setkey,
  3424. .setauthsize = aead_setauthsize,
  3425. .encrypt = aead_encrypt,
  3426. .decrypt = aead_decrypt,
  3427. .ivsize = DES_BLOCK_SIZE,
  3428. .maxauthsize = SHA224_DIGEST_SIZE,
  3429. },
  3430. .caam = {
  3431. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3432. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3433. OP_ALG_AAI_HMAC_PRECOMP,
  3434. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3435. },
  3436. },
  3437. {
  3438. .aead = {
  3439. .base = {
  3440. .cra_name = "echainiv(authenc(hmac(sha224),"
  3441. "cbc(des)))",
  3442. .cra_driver_name = "echainiv-authenc-"
  3443. "hmac-sha224-cbc-des-caam",
  3444. .cra_blocksize = DES_BLOCK_SIZE,
  3445. },
  3446. .setkey = aead_setkey,
  3447. .setauthsize = aead_setauthsize,
  3448. .encrypt = aead_encrypt,
  3449. .decrypt = aead_decrypt,
  3450. .ivsize = DES_BLOCK_SIZE,
  3451. .maxauthsize = SHA224_DIGEST_SIZE,
  3452. },
  3453. .caam = {
  3454. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3455. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3456. OP_ALG_AAI_HMAC_PRECOMP,
  3457. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3458. .geniv = true,
  3459. },
  3460. },
  3461. {
  3462. .aead = {
  3463. .base = {
  3464. .cra_name = "authenc(hmac(sha256),cbc(des))",
  3465. .cra_driver_name = "authenc-hmac-sha256-"
  3466. "cbc-des-caam",
  3467. .cra_blocksize = DES_BLOCK_SIZE,
  3468. },
  3469. .setkey = aead_setkey,
  3470. .setauthsize = aead_setauthsize,
  3471. .encrypt = aead_encrypt,
  3472. .decrypt = aead_decrypt,
  3473. .ivsize = DES_BLOCK_SIZE,
  3474. .maxauthsize = SHA256_DIGEST_SIZE,
  3475. },
  3476. .caam = {
  3477. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3478. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3479. OP_ALG_AAI_HMAC_PRECOMP,
  3480. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3481. },
  3482. },
  3483. {
  3484. .aead = {
  3485. .base = {
  3486. .cra_name = "echainiv(authenc(hmac(sha256),"
  3487. "cbc(des)))",
  3488. .cra_driver_name = "echainiv-authenc-"
  3489. "hmac-sha256-cbc-des-caam",
  3490. .cra_blocksize = DES_BLOCK_SIZE,
  3491. },
  3492. .setkey = aead_setkey,
  3493. .setauthsize = aead_setauthsize,
  3494. .encrypt = aead_encrypt,
  3495. .decrypt = aead_decrypt,
  3496. .ivsize = DES_BLOCK_SIZE,
  3497. .maxauthsize = SHA256_DIGEST_SIZE,
  3498. },
  3499. .caam = {
  3500. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3501. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3502. OP_ALG_AAI_HMAC_PRECOMP,
  3503. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3504. .geniv = true,
  3505. },
  3506. },
  3507. {
  3508. .aead = {
  3509. .base = {
  3510. .cra_name = "authenc(hmac(sha384),cbc(des))",
  3511. .cra_driver_name = "authenc-hmac-sha384-"
  3512. "cbc-des-caam",
  3513. .cra_blocksize = DES_BLOCK_SIZE,
  3514. },
  3515. .setkey = aead_setkey,
  3516. .setauthsize = aead_setauthsize,
  3517. .encrypt = aead_encrypt,
  3518. .decrypt = aead_decrypt,
  3519. .ivsize = DES_BLOCK_SIZE,
  3520. .maxauthsize = SHA384_DIGEST_SIZE,
  3521. },
  3522. .caam = {
  3523. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3524. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3525. OP_ALG_AAI_HMAC_PRECOMP,
  3526. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3527. },
  3528. },
  3529. {
  3530. .aead = {
  3531. .base = {
  3532. .cra_name = "echainiv(authenc(hmac(sha384),"
  3533. "cbc(des)))",
  3534. .cra_driver_name = "echainiv-authenc-"
  3535. "hmac-sha384-cbc-des-caam",
  3536. .cra_blocksize = DES_BLOCK_SIZE,
  3537. },
  3538. .setkey = aead_setkey,
  3539. .setauthsize = aead_setauthsize,
  3540. .encrypt = aead_encrypt,
  3541. .decrypt = aead_decrypt,
  3542. .ivsize = DES_BLOCK_SIZE,
  3543. .maxauthsize = SHA384_DIGEST_SIZE,
  3544. },
  3545. .caam = {
  3546. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3547. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3548. OP_ALG_AAI_HMAC_PRECOMP,
  3549. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3550. .geniv = true,
  3551. },
  3552. },
  3553. {
  3554. .aead = {
  3555. .base = {
  3556. .cra_name = "authenc(hmac(sha512),cbc(des))",
  3557. .cra_driver_name = "authenc-hmac-sha512-"
  3558. "cbc-des-caam",
  3559. .cra_blocksize = DES_BLOCK_SIZE,
  3560. },
  3561. .setkey = aead_setkey,
  3562. .setauthsize = aead_setauthsize,
  3563. .encrypt = aead_encrypt,
  3564. .decrypt = aead_decrypt,
  3565. .ivsize = DES_BLOCK_SIZE,
  3566. .maxauthsize = SHA512_DIGEST_SIZE,
  3567. },
  3568. .caam = {
  3569. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3570. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3571. OP_ALG_AAI_HMAC_PRECOMP,
  3572. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3573. },
  3574. },
  3575. {
  3576. .aead = {
  3577. .base = {
  3578. .cra_name = "echainiv(authenc(hmac(sha512),"
  3579. "cbc(des)))",
  3580. .cra_driver_name = "echainiv-authenc-"
  3581. "hmac-sha512-cbc-des-caam",
  3582. .cra_blocksize = DES_BLOCK_SIZE,
  3583. },
  3584. .setkey = aead_setkey,
  3585. .setauthsize = aead_setauthsize,
  3586. .encrypt = aead_encrypt,
  3587. .decrypt = aead_decrypt,
  3588. .ivsize = DES_BLOCK_SIZE,
  3589. .maxauthsize = SHA512_DIGEST_SIZE,
  3590. },
  3591. .caam = {
  3592. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  3593. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3594. OP_ALG_AAI_HMAC_PRECOMP,
  3595. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3596. .geniv = true,
  3597. },
  3598. },
  3599. {
  3600. .aead = {
  3601. .base = {
  3602. .cra_name = "authenc(hmac(md5),"
  3603. "rfc3686(ctr(aes)))",
  3604. .cra_driver_name = "authenc-hmac-md5-"
  3605. "rfc3686-ctr-aes-caam",
  3606. .cra_blocksize = 1,
  3607. },
  3608. .setkey = aead_setkey,
  3609. .setauthsize = aead_setauthsize,
  3610. .encrypt = aead_encrypt,
  3611. .decrypt = aead_decrypt,
  3612. .ivsize = CTR_RFC3686_IV_SIZE,
  3613. .maxauthsize = MD5_DIGEST_SIZE,
  3614. },
  3615. .caam = {
  3616. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3617. OP_ALG_AAI_CTR_MOD128,
  3618. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3619. OP_ALG_AAI_HMAC_PRECOMP,
  3620. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3621. .rfc3686 = true,
  3622. },
  3623. },
  3624. {
  3625. .aead = {
  3626. .base = {
  3627. .cra_name = "seqiv(authenc("
  3628. "hmac(md5),rfc3686(ctr(aes))))",
  3629. .cra_driver_name = "seqiv-authenc-hmac-md5-"
  3630. "rfc3686-ctr-aes-caam",
  3631. .cra_blocksize = 1,
  3632. },
  3633. .setkey = aead_setkey,
  3634. .setauthsize = aead_setauthsize,
  3635. .encrypt = aead_encrypt,
  3636. .decrypt = aead_decrypt,
  3637. .ivsize = CTR_RFC3686_IV_SIZE,
  3638. .maxauthsize = MD5_DIGEST_SIZE,
  3639. },
  3640. .caam = {
  3641. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3642. OP_ALG_AAI_CTR_MOD128,
  3643. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  3644. OP_ALG_AAI_HMAC_PRECOMP,
  3645. .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
  3646. .rfc3686 = true,
  3647. .geniv = true,
  3648. },
  3649. },
  3650. {
  3651. .aead = {
  3652. .base = {
  3653. .cra_name = "authenc(hmac(sha1),"
  3654. "rfc3686(ctr(aes)))",
  3655. .cra_driver_name = "authenc-hmac-sha1-"
  3656. "rfc3686-ctr-aes-caam",
  3657. .cra_blocksize = 1,
  3658. },
  3659. .setkey = aead_setkey,
  3660. .setauthsize = aead_setauthsize,
  3661. .encrypt = aead_encrypt,
  3662. .decrypt = aead_decrypt,
  3663. .ivsize = CTR_RFC3686_IV_SIZE,
  3664. .maxauthsize = SHA1_DIGEST_SIZE,
  3665. },
  3666. .caam = {
  3667. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3668. OP_ALG_AAI_CTR_MOD128,
  3669. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3670. OP_ALG_AAI_HMAC_PRECOMP,
  3671. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3672. .rfc3686 = true,
  3673. },
  3674. },
  3675. {
  3676. .aead = {
  3677. .base = {
  3678. .cra_name = "seqiv(authenc("
  3679. "hmac(sha1),rfc3686(ctr(aes))))",
  3680. .cra_driver_name = "seqiv-authenc-hmac-sha1-"
  3681. "rfc3686-ctr-aes-caam",
  3682. .cra_blocksize = 1,
  3683. },
  3684. .setkey = aead_setkey,
  3685. .setauthsize = aead_setauthsize,
  3686. .encrypt = aead_encrypt,
  3687. .decrypt = aead_decrypt,
  3688. .ivsize = CTR_RFC3686_IV_SIZE,
  3689. .maxauthsize = SHA1_DIGEST_SIZE,
  3690. },
  3691. .caam = {
  3692. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3693. OP_ALG_AAI_CTR_MOD128,
  3694. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  3695. OP_ALG_AAI_HMAC_PRECOMP,
  3696. .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
  3697. .rfc3686 = true,
  3698. .geniv = true,
  3699. },
  3700. },
  3701. {
  3702. .aead = {
  3703. .base = {
  3704. .cra_name = "authenc(hmac(sha224),"
  3705. "rfc3686(ctr(aes)))",
  3706. .cra_driver_name = "authenc-hmac-sha224-"
  3707. "rfc3686-ctr-aes-caam",
  3708. .cra_blocksize = 1,
  3709. },
  3710. .setkey = aead_setkey,
  3711. .setauthsize = aead_setauthsize,
  3712. .encrypt = aead_encrypt,
  3713. .decrypt = aead_decrypt,
  3714. .ivsize = CTR_RFC3686_IV_SIZE,
  3715. .maxauthsize = SHA224_DIGEST_SIZE,
  3716. },
  3717. .caam = {
  3718. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3719. OP_ALG_AAI_CTR_MOD128,
  3720. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3721. OP_ALG_AAI_HMAC_PRECOMP,
  3722. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3723. .rfc3686 = true,
  3724. },
  3725. },
  3726. {
  3727. .aead = {
  3728. .base = {
  3729. .cra_name = "seqiv(authenc("
  3730. "hmac(sha224),rfc3686(ctr(aes))))",
  3731. .cra_driver_name = "seqiv-authenc-hmac-sha224-"
  3732. "rfc3686-ctr-aes-caam",
  3733. .cra_blocksize = 1,
  3734. },
  3735. .setkey = aead_setkey,
  3736. .setauthsize = aead_setauthsize,
  3737. .encrypt = aead_encrypt,
  3738. .decrypt = aead_decrypt,
  3739. .ivsize = CTR_RFC3686_IV_SIZE,
  3740. .maxauthsize = SHA224_DIGEST_SIZE,
  3741. },
  3742. .caam = {
  3743. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3744. OP_ALG_AAI_CTR_MOD128,
  3745. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  3746. OP_ALG_AAI_HMAC_PRECOMP,
  3747. .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
  3748. .rfc3686 = true,
  3749. .geniv = true,
  3750. },
  3751. },
  3752. {
  3753. .aead = {
  3754. .base = {
  3755. .cra_name = "authenc(hmac(sha256),"
  3756. "rfc3686(ctr(aes)))",
  3757. .cra_driver_name = "authenc-hmac-sha256-"
  3758. "rfc3686-ctr-aes-caam",
  3759. .cra_blocksize = 1,
  3760. },
  3761. .setkey = aead_setkey,
  3762. .setauthsize = aead_setauthsize,
  3763. .encrypt = aead_encrypt,
  3764. .decrypt = aead_decrypt,
  3765. .ivsize = CTR_RFC3686_IV_SIZE,
  3766. .maxauthsize = SHA256_DIGEST_SIZE,
  3767. },
  3768. .caam = {
  3769. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3770. OP_ALG_AAI_CTR_MOD128,
  3771. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3772. OP_ALG_AAI_HMAC_PRECOMP,
  3773. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3774. .rfc3686 = true,
  3775. },
  3776. },
  3777. {
  3778. .aead = {
  3779. .base = {
  3780. .cra_name = "seqiv(authenc(hmac(sha256),"
  3781. "rfc3686(ctr(aes))))",
  3782. .cra_driver_name = "seqiv-authenc-hmac-sha256-"
  3783. "rfc3686-ctr-aes-caam",
  3784. .cra_blocksize = 1,
  3785. },
  3786. .setkey = aead_setkey,
  3787. .setauthsize = aead_setauthsize,
  3788. .encrypt = aead_encrypt,
  3789. .decrypt = aead_decrypt,
  3790. .ivsize = CTR_RFC3686_IV_SIZE,
  3791. .maxauthsize = SHA256_DIGEST_SIZE,
  3792. },
  3793. .caam = {
  3794. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3795. OP_ALG_AAI_CTR_MOD128,
  3796. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  3797. OP_ALG_AAI_HMAC_PRECOMP,
  3798. .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
  3799. .rfc3686 = true,
  3800. .geniv = true,
  3801. },
  3802. },
  3803. {
  3804. .aead = {
  3805. .base = {
  3806. .cra_name = "authenc(hmac(sha384),"
  3807. "rfc3686(ctr(aes)))",
  3808. .cra_driver_name = "authenc-hmac-sha384-"
  3809. "rfc3686-ctr-aes-caam",
  3810. .cra_blocksize = 1,
  3811. },
  3812. .setkey = aead_setkey,
  3813. .setauthsize = aead_setauthsize,
  3814. .encrypt = aead_encrypt,
  3815. .decrypt = aead_decrypt,
  3816. .ivsize = CTR_RFC3686_IV_SIZE,
  3817. .maxauthsize = SHA384_DIGEST_SIZE,
  3818. },
  3819. .caam = {
  3820. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3821. OP_ALG_AAI_CTR_MOD128,
  3822. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3823. OP_ALG_AAI_HMAC_PRECOMP,
  3824. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3825. .rfc3686 = true,
  3826. },
  3827. },
  3828. {
  3829. .aead = {
  3830. .base = {
  3831. .cra_name = "seqiv(authenc(hmac(sha384),"
  3832. "rfc3686(ctr(aes))))",
  3833. .cra_driver_name = "seqiv-authenc-hmac-sha384-"
  3834. "rfc3686-ctr-aes-caam",
  3835. .cra_blocksize = 1,
  3836. },
  3837. .setkey = aead_setkey,
  3838. .setauthsize = aead_setauthsize,
  3839. .encrypt = aead_encrypt,
  3840. .decrypt = aead_decrypt,
  3841. .ivsize = CTR_RFC3686_IV_SIZE,
  3842. .maxauthsize = SHA384_DIGEST_SIZE,
  3843. },
  3844. .caam = {
  3845. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3846. OP_ALG_AAI_CTR_MOD128,
  3847. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  3848. OP_ALG_AAI_HMAC_PRECOMP,
  3849. .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
  3850. .rfc3686 = true,
  3851. .geniv = true,
  3852. },
  3853. },
  3854. {
  3855. .aead = {
  3856. .base = {
  3857. .cra_name = "authenc(hmac(sha512),"
  3858. "rfc3686(ctr(aes)))",
  3859. .cra_driver_name = "authenc-hmac-sha512-"
  3860. "rfc3686-ctr-aes-caam",
  3861. .cra_blocksize = 1,
  3862. },
  3863. .setkey = aead_setkey,
  3864. .setauthsize = aead_setauthsize,
  3865. .encrypt = aead_encrypt,
  3866. .decrypt = aead_decrypt,
  3867. .ivsize = CTR_RFC3686_IV_SIZE,
  3868. .maxauthsize = SHA512_DIGEST_SIZE,
  3869. },
  3870. .caam = {
  3871. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3872. OP_ALG_AAI_CTR_MOD128,
  3873. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3874. OP_ALG_AAI_HMAC_PRECOMP,
  3875. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3876. .rfc3686 = true,
  3877. },
  3878. },
  3879. {
  3880. .aead = {
  3881. .base = {
  3882. .cra_name = "seqiv(authenc(hmac(sha512),"
  3883. "rfc3686(ctr(aes))))",
  3884. .cra_driver_name = "seqiv-authenc-hmac-sha512-"
  3885. "rfc3686-ctr-aes-caam",
  3886. .cra_blocksize = 1,
  3887. },
  3888. .setkey = aead_setkey,
  3889. .setauthsize = aead_setauthsize,
  3890. .encrypt = aead_encrypt,
  3891. .decrypt = aead_decrypt,
  3892. .ivsize = CTR_RFC3686_IV_SIZE,
  3893. .maxauthsize = SHA512_DIGEST_SIZE,
  3894. },
  3895. .caam = {
  3896. .class1_alg_type = OP_ALG_ALGSEL_AES |
  3897. OP_ALG_AAI_CTR_MOD128,
  3898. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  3899. OP_ALG_AAI_HMAC_PRECOMP,
  3900. .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
  3901. .rfc3686 = true,
  3902. .geniv = true,
  3903. },
  3904. },
  3905. };
  3906. struct caam_crypto_alg {
  3907. struct crypto_alg crypto_alg;
  3908. struct list_head entry;
  3909. struct caam_alg_entry caam;
  3910. };
  3911. static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
  3912. {
  3913. ctx->jrdev = caam_jr_alloc();
  3914. if (IS_ERR(ctx->jrdev)) {
  3915. pr_err("Job Ring Device allocation for transform failed\n");
  3916. return PTR_ERR(ctx->jrdev);
  3917. }
  3918. /* copy descriptor header template value */
  3919. ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  3920. ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  3921. ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
  3922. return 0;
  3923. }
  3924. static int caam_cra_init(struct crypto_tfm *tfm)
  3925. {
  3926. struct crypto_alg *alg = tfm->__crt_alg;
  3927. struct caam_crypto_alg *caam_alg =
  3928. container_of(alg, struct caam_crypto_alg, crypto_alg);
  3929. struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
  3930. return caam_init_common(ctx, &caam_alg->caam);
  3931. }
  3932. static int caam_aead_init(struct crypto_aead *tfm)
  3933. {
  3934. struct aead_alg *alg = crypto_aead_alg(tfm);
  3935. struct caam_aead_alg *caam_alg =
  3936. container_of(alg, struct caam_aead_alg, aead);
  3937. struct caam_ctx *ctx = crypto_aead_ctx(tfm);
  3938. return caam_init_common(ctx, &caam_alg->caam);
  3939. }
  3940. static void caam_exit_common(struct caam_ctx *ctx)
  3941. {
  3942. if (ctx->sh_desc_enc_dma &&
  3943. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
  3944. dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
  3945. desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
  3946. if (ctx->sh_desc_dec_dma &&
  3947. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
  3948. dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
  3949. desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
  3950. if (ctx->sh_desc_givenc_dma &&
  3951. !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
  3952. dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
  3953. desc_bytes(ctx->sh_desc_givenc),
  3954. DMA_TO_DEVICE);
  3955. if (ctx->key_dma &&
  3956. !dma_mapping_error(ctx->jrdev, ctx->key_dma))
  3957. dma_unmap_single(ctx->jrdev, ctx->key_dma,
  3958. ctx->enckeylen + ctx->split_key_pad_len,
  3959. DMA_TO_DEVICE);
  3960. caam_jr_free(ctx->jrdev);
  3961. }
  3962. static void caam_cra_exit(struct crypto_tfm *tfm)
  3963. {
  3964. caam_exit_common(crypto_tfm_ctx(tfm));
  3965. }
  3966. static void caam_aead_exit(struct crypto_aead *tfm)
  3967. {
  3968. caam_exit_common(crypto_aead_ctx(tfm));
  3969. }
  3970. static void __exit caam_algapi_exit(void)
  3971. {
  3972. struct caam_crypto_alg *t_alg, *n;
  3973. int i;
  3974. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  3975. struct caam_aead_alg *t_alg = driver_aeads + i;
  3976. if (t_alg->registered)
  3977. crypto_unregister_aead(&t_alg->aead);
  3978. }
  3979. if (!alg_list.next)
  3980. return;
  3981. list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
  3982. crypto_unregister_alg(&t_alg->crypto_alg);
  3983. list_del(&t_alg->entry);
  3984. kfree(t_alg);
  3985. }
  3986. }
  3987. static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
  3988. *template)
  3989. {
  3990. struct caam_crypto_alg *t_alg;
  3991. struct crypto_alg *alg;
  3992. t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
  3993. if (!t_alg) {
  3994. pr_err("failed to allocate t_alg\n");
  3995. return ERR_PTR(-ENOMEM);
  3996. }
  3997. alg = &t_alg->crypto_alg;
  3998. snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
  3999. snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
  4000. template->driver_name);
  4001. alg->cra_module = THIS_MODULE;
  4002. alg->cra_init = caam_cra_init;
  4003. alg->cra_exit = caam_cra_exit;
  4004. alg->cra_priority = CAAM_CRA_PRIORITY;
  4005. alg->cra_blocksize = template->blocksize;
  4006. alg->cra_alignmask = 0;
  4007. alg->cra_ctxsize = sizeof(struct caam_ctx);
  4008. alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
  4009. template->type;
  4010. switch (template->type) {
  4011. case CRYPTO_ALG_TYPE_GIVCIPHER:
  4012. alg->cra_type = &crypto_givcipher_type;
  4013. alg->cra_ablkcipher = template->template_ablkcipher;
  4014. break;
  4015. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  4016. alg->cra_type = &crypto_ablkcipher_type;
  4017. alg->cra_ablkcipher = template->template_ablkcipher;
  4018. break;
  4019. }
  4020. t_alg->caam.class1_alg_type = template->class1_alg_type;
  4021. t_alg->caam.class2_alg_type = template->class2_alg_type;
  4022. t_alg->caam.alg_op = template->alg_op;
  4023. return t_alg;
  4024. }
  4025. static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  4026. {
  4027. struct aead_alg *alg = &t_alg->aead;
  4028. alg->base.cra_module = THIS_MODULE;
  4029. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  4030. alg->base.cra_ctxsize = sizeof(struct caam_ctx);
  4031. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
  4032. alg->init = caam_aead_init;
  4033. alg->exit = caam_aead_exit;
  4034. }
  4035. static int __init caam_algapi_init(void)
  4036. {
  4037. struct device_node *dev_node;
  4038. struct platform_device *pdev;
  4039. struct device *ctrldev;
  4040. struct caam_drv_private *priv;
  4041. int i = 0, err = 0;
  4042. u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
  4043. unsigned int md_limit = SHA512_DIGEST_SIZE;
  4044. bool registered = false;
  4045. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
  4046. if (!dev_node) {
  4047. dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
  4048. if (!dev_node)
  4049. return -ENODEV;
  4050. }
  4051. pdev = of_find_device_by_node(dev_node);
  4052. if (!pdev) {
  4053. of_node_put(dev_node);
  4054. return -ENODEV;
  4055. }
  4056. ctrldev = &pdev->dev;
  4057. priv = dev_get_drvdata(ctrldev);
  4058. of_node_put(dev_node);
  4059. /*
  4060. * If priv is NULL, it's probably because the caam driver wasn't
  4061. * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
  4062. */
  4063. if (!priv)
  4064. return -ENODEV;
  4065. INIT_LIST_HEAD(&alg_list);
  4066. /*
  4067. * Register crypto algorithms the device supports.
  4068. * First, detect presence and attributes of DES, AES, and MD blocks.
  4069. */
  4070. cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
  4071. cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
  4072. des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
  4073. aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
  4074. md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  4075. /* If MD is present, limit digest size based on LP256 */
  4076. if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
  4077. md_limit = SHA256_DIGEST_SIZE;
  4078. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  4079. struct caam_crypto_alg *t_alg;
  4080. struct caam_alg_template *alg = driver_algs + i;
  4081. u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
  4082. /* Skip DES algorithms if not supported by device */
  4083. if (!des_inst &&
  4084. ((alg_sel == OP_ALG_ALGSEL_3DES) ||
  4085. (alg_sel == OP_ALG_ALGSEL_DES)))
  4086. continue;
  4087. /* Skip AES algorithms if not supported by device */
  4088. if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
  4089. continue;
  4090. /*
  4091. * Check support for AES modes not available
  4092. * on LP devices.
  4093. */
  4094. if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
  4095. if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
  4096. OP_ALG_AAI_XTS)
  4097. continue;
  4098. t_alg = caam_alg_alloc(alg);
  4099. if (IS_ERR(t_alg)) {
  4100. err = PTR_ERR(t_alg);
  4101. pr_warn("%s alg allocation failed\n", alg->driver_name);
  4102. continue;
  4103. }
  4104. err = crypto_register_alg(&t_alg->crypto_alg);
  4105. if (err) {
  4106. pr_warn("%s alg registration failed\n",
  4107. t_alg->crypto_alg.cra_driver_name);
  4108. kfree(t_alg);
  4109. continue;
  4110. }
  4111. list_add_tail(&t_alg->entry, &alg_list);
  4112. registered = true;
  4113. }
  4114. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  4115. struct caam_aead_alg *t_alg = driver_aeads + i;
  4116. u32 c1_alg_sel = t_alg->caam.class1_alg_type &
  4117. OP_ALG_ALGSEL_MASK;
  4118. u32 c2_alg_sel = t_alg->caam.class2_alg_type &
  4119. OP_ALG_ALGSEL_MASK;
  4120. u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
  4121. /* Skip DES algorithms if not supported by device */
  4122. if (!des_inst &&
  4123. ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
  4124. (c1_alg_sel == OP_ALG_ALGSEL_DES)))
  4125. continue;
  4126. /* Skip AES algorithms if not supported by device */
  4127. if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
  4128. continue;
  4129. /*
  4130. * Check support for AES algorithms not available
  4131. * on LP devices.
  4132. */
  4133. if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
  4134. if (alg_aai == OP_ALG_AAI_GCM)
  4135. continue;
  4136. /*
  4137. * Skip algorithms requiring message digests
  4138. * if MD or MD size is not supported by device.
  4139. */
  4140. if (c2_alg_sel &&
  4141. (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
  4142. continue;
  4143. caam_aead_alg_init(t_alg);
  4144. err = crypto_register_aead(&t_alg->aead);
  4145. if (err) {
  4146. pr_warn("%s alg registration failed\n",
  4147. t_alg->aead.base.cra_driver_name);
  4148. continue;
  4149. }
  4150. t_alg->registered = true;
  4151. registered = true;
  4152. }
  4153. if (registered)
  4154. pr_info("caam algorithms registered in /proc/crypto\n");
  4155. return err;
  4156. }
  4157. module_init(caam_algapi_init);
  4158. module_exit(caam_algapi_exit);
  4159. MODULE_LICENSE("GPL");
  4160. MODULE_DESCRIPTION("FSL CAAM support for crypto API");
  4161. MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");