qce.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634
  1. /* Qualcomm Crypto Engine driver.
  2. *
  3. * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/types.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/mod_devicetable.h>
  18. #include <linux/device.h>
  19. #include <linux/clk.h>
  20. #include <linux/err.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/io.h>
  23. #include <linux/platform_device.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/delay.h>
  26. #include <linux/crypto.h>
  27. #include <crypto/hash.h>
  28. #include <crypto/sha.h>
  29. #include <linux/qcedev.h>
  30. #include <linux/qcota.h>
  31. #include <mach/dma.h>
  32. #include "qce.h"
  33. #include "qcryptohw_30.h"
  34. #include "qce_ota.h"
  35. /* ADM definitions */
  36. #define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */
  37. #define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16)
  38. #define DST_INDEX_SG_CMD(index) (index & 0x3fff)
  39. #define ADM_DESC_LAST (1 << 31)
  40. /* Data xfer between DM and CE in blocks of 16 bytes */
  41. #define ADM_CE_BLOCK_SIZE 16
  42. #define QCE_FIFO_SIZE 0x8000
  43. /* Data xfer between DM and CE in blocks of 64 bytes */
  44. #define ADM_SHA_BLOCK_SIZE 64
  45. #define ADM_DESC_LENGTH_MASK 0xffff
  46. #define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK)
  47. struct dmov_desc {
  48. uint32_t addr;
  49. uint32_t len;
  50. };
  51. #define ADM_STATUS_OK 0x80000002
  52. /* Misc definitions */
  53. /* QCE max number of descriptor in a descriptor list */
  54. #define QCE_MAX_NUM_DESC 128
  55. /* State of DM channel */
  56. enum qce_chan_st_enum {
  57. QCE_CHAN_STATE_IDLE = 0,
  58. QCE_CHAN_STATE_IN_PROG = 1,
  59. QCE_CHAN_STATE_COMP = 2,
  60. QCE_CHAN_STATE_LAST
  61. };
  62. /*
  63. * CE HW device structure.
  64. * Each engine has an instance of the structure.
  65. * Each engine can only handle one crypto operation at one time. It is up to
  66. * the sw above to ensure single threading of operation on an engine.
  67. */
  68. struct qce_device {
  69. struct device *pdev; /* Handle to platform_device structure */
  70. unsigned char *coh_vmem; /* Allocated coherent virtual memory */
  71. dma_addr_t coh_pmem; /* Allocated coherent physical memory */
  72. void __iomem *iobase; /* Virtual io base of CE HW */
  73. unsigned int phy_iobase; /* Physical io base of CE HW */
  74. struct clk *ce_clk; /* Handle to CE clk */
  75. unsigned int crci_in; /* CRCI for CE DM IN Channel */
  76. unsigned int crci_out; /* CRCI for CE DM OUT Channel */
  77. unsigned int crci_hash; /* CRCI for CE HASH */
  78. unsigned int chan_ce_in; /* ADM channel used for CE input
  79. * and auth result if authentication
  80. * only operation. */
  81. unsigned int chan_ce_out; /* ADM channel used for CE output,
  82. and icv for esp */
  83. unsigned int *cmd_pointer_list_ce_in;
  84. dma_addr_t phy_cmd_pointer_list_ce_in;
  85. unsigned int *cmd_pointer_list_ce_out;
  86. dma_addr_t phy_cmd_pointer_list_ce_out;
  87. unsigned char *cmd_list_ce_in;
  88. dma_addr_t phy_cmd_list_ce_in;
  89. unsigned char *cmd_list_ce_out;
  90. dma_addr_t phy_cmd_list_ce_out;
  91. struct dmov_desc *ce_out_src_desc;
  92. dma_addr_t phy_ce_out_src_desc;
  93. struct dmov_desc *ce_out_dst_desc;
  94. dma_addr_t phy_ce_out_dst_desc;
  95. struct dmov_desc *ce_in_src_desc;
  96. dma_addr_t phy_ce_in_src_desc;
  97. struct dmov_desc *ce_in_dst_desc;
  98. dma_addr_t phy_ce_in_dst_desc;
  99. unsigned char *ce_out_ignore;
  100. dma_addr_t phy_ce_out_ignore;
  101. unsigned char *ce_pad;
  102. dma_addr_t phy_ce_pad;
  103. struct msm_dmov_cmd *chan_ce_in_cmd;
  104. struct msm_dmov_cmd *chan_ce_out_cmd;
  105. uint32_t ce_out_ignore_size;
  106. int ce_out_dst_desc_index;
  107. int ce_in_dst_desc_index;
  108. int ce_out_src_desc_index;
  109. int ce_in_src_desc_index;
  110. enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */
  111. enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */
  112. int chan_ce_in_status; /* chan ce_in status */
  113. int chan_ce_out_status; /* chan ce_out status */
  114. unsigned char *dig_result;
  115. dma_addr_t phy_dig_result;
  116. /* cached aes key */
  117. uint32_t aeskey[AES256_KEY_SIZE/sizeof(uint32_t)];
  118. uint32_t aes_key_size; /* cached aes key size in bytes */
  119. int fastaes; /* ce supports fast aes */
  120. int hmac; /* ce support hmac-sha1 */
  121. bool ota; /* ce support ota */
  122. qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
  123. int assoc_nents;
  124. int src_nents;
  125. int dst_nents;
  126. void *areq;
  127. enum qce_cipher_mode_enum mode;
  128. dma_addr_t phy_iv_in;
  129. dma_addr_t phy_ota_src;
  130. dma_addr_t phy_ota_dst;
  131. unsigned int ota_size;
  132. int err;
  133. };
  134. /* Standard initialization vector for SHA-1, source: FIPS 180-2 */
  135. static uint32_t _std_init_vector_sha1[] = {
  136. 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
  137. };
  138. /* Standard initialization vector for SHA-256, source: FIPS 180-2 */
  139. static uint32_t _std_init_vector_sha256[] = {
  140. 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
  141. 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
  142. };
  143. /* Source: FIPS 197, Figure 7. S-box: substitution values for the byte xy */
  144. static const uint32_t _s_box[256] = {
  145. 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
  146. 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
  147. 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
  148. 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
  149. 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
  150. 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
  151. 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
  152. 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
  153. 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
  154. 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
  155. 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
  156. 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
  157. 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
  158. 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
  159. 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
  160. 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
  161. 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
  162. 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
  163. 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
  164. 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
  165. 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
  166. 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
  167. 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
  168. 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
  169. 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
  170. 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
  171. 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
  172. 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
  173. 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
  174. 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
  175. 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
  176. 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 };
  177. /*
  178. * Source: FIPS 197, Sec 5.2 Key Expansion, Figure 11. Pseudo Code for Key
  179. * Expansion.
  180. */
  181. static void _aes_expand_key_schedule(uint32_t keysize, uint32_t *AES_KEY,
  182. uint32_t *AES_RND_KEY)
  183. {
  184. uint32_t i;
  185. uint32_t Nk;
  186. uint32_t Nr, rot_data;
  187. uint32_t Rcon = 0x01000000;
  188. uint32_t temp;
  189. uint32_t data_in;
  190. uint32_t MSB_store;
  191. uint32_t byte_for_sub;
  192. uint32_t word_sub[4];
  193. switch (keysize) {
  194. case 192:
  195. Nk = 6;
  196. Nr = 12;
  197. break;
  198. case 256:
  199. Nk = 8;
  200. Nr = 14;
  201. break;
  202. case 128:
  203. default: /* default to AES128 */
  204. Nk = 4;
  205. Nr = 10;
  206. break;
  207. }
  208. /* key expansion */
  209. i = 0;
  210. while (i < Nk) {
  211. AES_RND_KEY[i] = AES_KEY[i];
  212. i = i + 1;
  213. }
  214. i = Nk;
  215. while (i < (4 * (Nr + 1))) {
  216. temp = AES_RND_KEY[i-1];
  217. if (Nr == 14) {
  218. switch (i) {
  219. case 8:
  220. Rcon = 0x01000000;
  221. break;
  222. case 16:
  223. Rcon = 0x02000000;
  224. break;
  225. case 24:
  226. Rcon = 0x04000000;
  227. break;
  228. case 32:
  229. Rcon = 0x08000000;
  230. break;
  231. case 40:
  232. Rcon = 0x10000000;
  233. break;
  234. case 48:
  235. Rcon = 0x20000000;
  236. break;
  237. case 56:
  238. Rcon = 0x40000000;
  239. break;
  240. }
  241. } else if (Nr == 12) {
  242. switch (i) {
  243. case 6:
  244. Rcon = 0x01000000;
  245. break;
  246. case 12:
  247. Rcon = 0x02000000;
  248. break;
  249. case 18:
  250. Rcon = 0x04000000;
  251. break;
  252. case 24:
  253. Rcon = 0x08000000;
  254. break;
  255. case 30:
  256. Rcon = 0x10000000;
  257. break;
  258. case 36:
  259. Rcon = 0x20000000;
  260. break;
  261. case 42:
  262. Rcon = 0x40000000;
  263. break;
  264. case 48:
  265. Rcon = 0x80000000;
  266. break;
  267. }
  268. } else if (Nr == 10) {
  269. switch (i) {
  270. case 4:
  271. Rcon = 0x01000000;
  272. break;
  273. case 8:
  274. Rcon = 0x02000000;
  275. break;
  276. case 12:
  277. Rcon = 0x04000000;
  278. break;
  279. case 16:
  280. Rcon = 0x08000000;
  281. break;
  282. case 20:
  283. Rcon = 0x10000000;
  284. break;
  285. case 24:
  286. Rcon = 0x20000000;
  287. break;
  288. case 28:
  289. Rcon = 0x40000000;
  290. break;
  291. case 32:
  292. Rcon = 0x80000000;
  293. break;
  294. case 36:
  295. Rcon = 0x1b000000;
  296. break;
  297. case 40:
  298. Rcon = 0x36000000;
  299. break;
  300. }
  301. }
  302. if ((i % Nk) == 0) {
  303. data_in = temp;
  304. MSB_store = (data_in >> 24 & 0xff);
  305. rot_data = (data_in << 8) | MSB_store;
  306. byte_for_sub = rot_data;
  307. word_sub[0] = _s_box[(byte_for_sub & 0xff)];
  308. word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
  309. << 8);
  310. word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
  311. << 16);
  312. word_sub[3] = (_s_box[((byte_for_sub & 0xff000000)
  313. >> 24)] << 24);
  314. word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] |
  315. word_sub[3];
  316. temp = word_sub[0] ^ Rcon;
  317. } else if ((Nk > 6) && ((i % Nk) == 4)) {
  318. byte_for_sub = temp;
  319. word_sub[0] = _s_box[(byte_for_sub & 0xff)];
  320. word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)]
  321. << 8);
  322. word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)]
  323. << 16);
  324. word_sub[3] = (_s_box[((byte_for_sub & 0xff000000) >>
  325. 24)] << 24);
  326. word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] |
  327. word_sub[3];
  328. temp = word_sub[0];
  329. }
  330. AES_RND_KEY[i] = AES_RND_KEY[i-Nk]^temp;
  331. i = i+1;
  332. }
  333. }
  334. static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
  335. unsigned int len)
  336. {
  337. unsigned n;
  338. n = len / sizeof(uint32_t);
  339. for (; n > 0; n--) {
  340. *iv = ((*b << 24) & 0xff000000) |
  341. (((*(b+1)) << 16) & 0xff0000) |
  342. (((*(b+2)) << 8) & 0xff00) |
  343. (*(b+3) & 0xff);
  344. b += sizeof(uint32_t);
  345. iv++;
  346. }
  347. n = len % sizeof(uint32_t);
  348. if (n == 3) {
  349. *iv = ((*b << 24) & 0xff000000) |
  350. (((*(b+1)) << 16) & 0xff0000) |
  351. (((*(b+2)) << 8) & 0xff00);
  352. } else if (n == 2) {
  353. *iv = ((*b << 24) & 0xff000000) |
  354. (((*(b+1)) << 16) & 0xff0000);
  355. } else if (n == 1) {
  356. *iv = ((*b << 24) & 0xff000000);
  357. }
  358. }
  359. static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b,
  360. unsigned int len)
  361. {
  362. unsigned n = len / sizeof(uint32_t);
  363. for (; n > 0; n--) {
  364. *b++ = (unsigned char) ((*iv >> 24) & 0xff);
  365. *b++ = (unsigned char) ((*iv >> 16) & 0xff);
  366. *b++ = (unsigned char) ((*iv >> 8) & 0xff);
  367. *b++ = (unsigned char) (*iv & 0xff);
  368. iv++;
  369. }
  370. n = len % sizeof(uint32_t);
  371. if (n == 3) {
  372. *b++ = (unsigned char) ((*iv >> 24) & 0xff);
  373. *b++ = (unsigned char) ((*iv >> 16) & 0xff);
  374. *b = (unsigned char) ((*iv >> 8) & 0xff);
  375. } else if (n == 2) {
  376. *b++ = (unsigned char) ((*iv >> 24) & 0xff);
  377. *b = (unsigned char) ((*iv >> 16) & 0xff);
  378. } else if (n == 1) {
  379. *b = (unsigned char) ((*iv >> 24) & 0xff);
  380. }
  381. }
  382. static int count_sg(struct scatterlist *sg, int nbytes)
  383. {
  384. int i;
  385. for (i = 0; nbytes > 0; i++, sg = scatterwalk_sg_next(sg))
  386. nbytes -= sg->length;
  387. return i;
  388. }
  389. static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  390. enum dma_data_direction direction)
  391. {
  392. int i;
  393. for (i = 0; i < nents; ++i) {
  394. dma_map_sg(dev, sg, 1, direction);
  395. sg = scatterwalk_sg_next(sg);
  396. }
  397. return nents;
  398. }
  399. static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  400. int nents, enum dma_data_direction direction)
  401. {
  402. int i;
  403. for (i = 0; i < nents; ++i) {
  404. dma_unmap_sg(dev, sg, 1, direction);
  405. sg = scatterwalk_sg_next(sg);
  406. }
  407. return nents;
  408. }
  409. static int _probe_ce_engine(struct qce_device *pce_dev)
  410. {
  411. unsigned int val;
  412. unsigned int rev;
  413. unsigned int eng_availability; /* engine available functions */
  414. val = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
  415. if ((val & 0xfffffff) != 0x0200004) {
  416. dev_err(pce_dev->pdev,
  417. "unknown Qualcomm crypto device at 0x%x 0x%x\n",
  418. pce_dev->phy_iobase, val);
  419. return -EIO;
  420. };
  421. rev = (val & CRYPTO_CORE_REV_MASK) >> CRYPTO_CORE_REV;
  422. if (rev == 0x2) {
  423. dev_info(pce_dev->pdev,
  424. "Qualcomm Crypto 3e device found at 0x%x\n",
  425. pce_dev->phy_iobase);
  426. } else if (rev == 0x1) {
  427. dev_info(pce_dev->pdev,
  428. "Qualcomm Crypto 3 device found at 0x%x\n",
  429. pce_dev->phy_iobase);
  430. } else if (rev == 0x0) {
  431. dev_info(pce_dev->pdev,
  432. "Qualcomm Crypto 2 device found at 0x%x\n",
  433. pce_dev->phy_iobase);
  434. } else {
  435. dev_err(pce_dev->pdev,
  436. "unknown Qualcomm crypto device at 0x%x\n",
  437. pce_dev->phy_iobase);
  438. return -EIO;
  439. }
  440. eng_availability = readl_relaxed(pce_dev->iobase +
  441. CRYPTO_ENGINES_AVAIL);
  442. if (((eng_availability & CRYPTO_AES_SEL_MASK) >> CRYPTO_AES_SEL)
  443. == CRYPTO_AES_SEL_FAST)
  444. pce_dev->fastaes = 1;
  445. else
  446. pce_dev->fastaes = 0;
  447. if (eng_availability & (1 << CRYPTO_HMAC_SEL))
  448. pce_dev->hmac = 1;
  449. else
  450. pce_dev->hmac = 0;
  451. if ((eng_availability & (1 << CRYPTO_F9_SEL)) &&
  452. (eng_availability & (1 << CRYPTO_F8_SEL)))
  453. pce_dev->ota = true;
  454. else
  455. pce_dev->ota = false;
  456. pce_dev->aes_key_size = 0;
  457. return 0;
  458. };
  459. static int _init_ce_engine(struct qce_device *pce_dev)
  460. {
  461. unsigned int val;
  462. /* reset qce */
  463. writel_relaxed(1 << CRYPTO_SW_RST, pce_dev->iobase + CRYPTO_CONFIG_REG);
  464. /* Ensure previous instruction (write to reset bit)
  465. * was completed.
  466. */
  467. mb();
  468. /* configure ce */
  469. val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) |
  470. (1 << CRYPTO_MASK_AUTH_DONE_INTR) |
  471. (1 << CRYPTO_MASK_ERR_INTR);
  472. writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG);
  473. if (_probe_ce_engine(pce_dev) < 0)
  474. return -EIO;
  475. if (readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG) != val) {
  476. dev_err(pce_dev->pdev,
  477. "unknown Qualcomm crypto device at 0x%x\n",
  478. pce_dev->phy_iobase);
  479. return -EIO;
  480. };
  481. return 0;
  482. };
  483. static int _sha_ce_setup(struct qce_device *pce_dev, struct qce_sha_req *sreq)
  484. {
  485. uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
  486. uint32_t diglen;
  487. int rc;
  488. int i;
  489. uint32_t cfg = 0;
  490. /* if not the last, the size has to be on the block boundary */
  491. if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
  492. return -EIO;
  493. switch (sreq->alg) {
  494. case QCE_HASH_SHA1:
  495. diglen = SHA1_DIGEST_SIZE;
  496. break;
  497. case QCE_HASH_SHA256:
  498. diglen = SHA256_DIGEST_SIZE;
  499. break;
  500. default:
  501. return -EINVAL;
  502. }
  503. /*
  504. * write 20/32 bytes, 5/8 words into auth_iv
  505. * for SHA1/SHA256
  506. */
  507. if (sreq->first_blk) {
  508. if (sreq->alg == QCE_HASH_SHA1) {
  509. for (i = 0; i < 5; i++)
  510. auth32[i] = _std_init_vector_sha1[i];
  511. } else {
  512. for (i = 0; i < 8; i++)
  513. auth32[i] = _std_init_vector_sha256[i];
  514. }
  515. } else
  516. _byte_stream_to_net_words(auth32, sreq->digest, diglen);
  517. rc = clk_enable(pce_dev->ce_clk);
  518. if (rc)
  519. return rc;
  520. writel_relaxed(auth32[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
  521. writel_relaxed(auth32[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
  522. writel_relaxed(auth32[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
  523. writel_relaxed(auth32[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
  524. writel_relaxed(auth32[4], pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
  525. if (sreq->alg == QCE_HASH_SHA256) {
  526. writel_relaxed(auth32[5], pce_dev->iobase +
  527. CRYPTO_AUTH_IV5_REG);
  528. writel_relaxed(auth32[6], pce_dev->iobase +
  529. CRYPTO_AUTH_IV6_REG);
  530. writel_relaxed(auth32[7], pce_dev->iobase +
  531. CRYPTO_AUTH_IV7_REG);
  532. }
  533. /* write auth_bytecnt 0/1, start with 0 */
  534. writel_relaxed(sreq->auth_data[0], pce_dev->iobase +
  535. CRYPTO_AUTH_BYTECNT0_REG);
  536. writel_relaxed(sreq->auth_data[1], pce_dev->iobase +
  537. CRYPTO_AUTH_BYTECNT1_REG);
  538. /* write auth_seg_cfg */
  539. writel_relaxed(sreq->size << CRYPTO_AUTH_SEG_SIZE,
  540. pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  541. /*
  542. * write seg_cfg
  543. */
  544. if (sreq->alg == QCE_HASH_SHA1)
  545. cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE);
  546. else
  547. cfg = (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE);
  548. if (sreq->first_blk)
  549. cfg |= 1 << CRYPTO_FIRST;
  550. if (sreq->last_blk)
  551. cfg |= 1 << CRYPTO_LAST;
  552. cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG;
  553. writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
  554. /* write seg_size */
  555. writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  556. /* issue go to crypto */
  557. writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
  558. /* Ensure previous instructions (setting the GO register)
  559. * was completed before issuing a DMA transfer request
  560. */
  561. mb();
  562. return 0;
  563. }
  564. static int _ce_setup(struct qce_device *pce_dev, struct qce_req *q_req,
  565. uint32_t totallen, uint32_t coffset)
  566. {
  567. uint32_t hmackey[HMAC_KEY_SIZE/sizeof(uint32_t)] = {
  568. 0, 0, 0, 0, 0};
  569. uint32_t enckey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] = {
  570. 0, 0, 0, 0, 0, 0, 0, 0};
  571. uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
  572. 0, 0, 0, 0};
  573. uint32_t enck_size_in_word = q_req->encklen / sizeof(uint32_t);
  574. int aes_key_chg;
  575. int i, rc;
  576. uint32_t aes_round_key[CRYPTO_AES_RNDKEYS];
  577. uint32_t cfg;
  578. uint32_t ivsize = q_req->ivsize;
  579. rc = clk_enable(pce_dev->ce_clk);
  580. if (rc)
  581. return rc;
  582. cfg = (1 << CRYPTO_FIRST) | (1 << CRYPTO_LAST);
  583. if (q_req->op == QCE_REQ_AEAD) {
  584. /* do authentication setup */
  585. cfg |= (CRYPTO_AUTH_SIZE_HMAC_SHA1 << CRYPTO_AUTH_SIZE)|
  586. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG);
  587. /* write sha1 init vector */
  588. writel_relaxed(_std_init_vector_sha1[0],
  589. pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
  590. writel_relaxed(_std_init_vector_sha1[1],
  591. pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
  592. writel_relaxed(_std_init_vector_sha1[2],
  593. pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
  594. writel_relaxed(_std_init_vector_sha1[3],
  595. pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
  596. writel_relaxed(_std_init_vector_sha1[4],
  597. pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
  598. /* write hmac key */
  599. _byte_stream_to_net_words(hmackey, q_req->authkey,
  600. q_req->authklen);
  601. writel_relaxed(hmackey[0], pce_dev->iobase +
  602. CRYPTO_AUTH_IV5_REG);
  603. writel_relaxed(hmackey[1], pce_dev->iobase +
  604. CRYPTO_AUTH_IV6_REG);
  605. writel_relaxed(hmackey[2], pce_dev->iobase +
  606. CRYPTO_AUTH_IV7_REG);
  607. writel_relaxed(hmackey[3], pce_dev->iobase +
  608. CRYPTO_AUTH_IV8_REG);
  609. writel_relaxed(hmackey[4], pce_dev->iobase +
  610. CRYPTO_AUTH_IV9_REG);
  611. writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
  612. writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
  613. /* write auth_seg_cfg */
  614. writel_relaxed((totallen << CRYPTO_AUTH_SEG_SIZE) & 0xffff0000,
  615. pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  616. }
  617. _byte_stream_to_net_words(enckey32, q_req->enckey, q_req->encklen);
  618. switch (q_req->mode) {
  619. case QCE_MODE_ECB:
  620. cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  621. break;
  622. case QCE_MODE_CBC:
  623. cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  624. break;
  625. case QCE_MODE_CTR:
  626. default:
  627. cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
  628. break;
  629. }
  630. pce_dev->mode = q_req->mode;
  631. switch (q_req->alg) {
  632. case CIPHER_ALG_DES:
  633. if (q_req->mode != QCE_MODE_ECB) {
  634. _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
  635. writel_relaxed(enciv32[0], pce_dev->iobase +
  636. CRYPTO_CNTR0_IV0_REG);
  637. writel_relaxed(enciv32[1], pce_dev->iobase +
  638. CRYPTO_CNTR1_IV1_REG);
  639. }
  640. writel_relaxed(enckey32[0], pce_dev->iobase +
  641. CRYPTO_DES_KEY0_REG);
  642. writel_relaxed(enckey32[1], pce_dev->iobase +
  643. CRYPTO_DES_KEY1_REG);
  644. cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
  645. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
  646. break;
  647. case CIPHER_ALG_3DES:
  648. if (q_req->mode != QCE_MODE_ECB) {
  649. _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
  650. writel_relaxed(enciv32[0], pce_dev->iobase +
  651. CRYPTO_CNTR0_IV0_REG);
  652. writel_relaxed(enciv32[1], pce_dev->iobase +
  653. CRYPTO_CNTR1_IV1_REG);
  654. }
  655. writel_relaxed(enckey32[0], pce_dev->iobase +
  656. CRYPTO_DES_KEY0_REG);
  657. writel_relaxed(enckey32[1], pce_dev->iobase +
  658. CRYPTO_DES_KEY1_REG);
  659. writel_relaxed(enckey32[2], pce_dev->iobase +
  660. CRYPTO_DES_KEY2_REG);
  661. writel_relaxed(enckey32[3], pce_dev->iobase +
  662. CRYPTO_DES_KEY3_REG);
  663. writel_relaxed(enckey32[4], pce_dev->iobase +
  664. CRYPTO_DES_KEY4_REG);
  665. writel_relaxed(enckey32[5], pce_dev->iobase +
  666. CRYPTO_DES_KEY5_REG);
  667. cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
  668. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG));
  669. break;
  670. case CIPHER_ALG_AES:
  671. default:
  672. if (q_req->mode != QCE_MODE_ECB) {
  673. _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
  674. writel_relaxed(enciv32[0], pce_dev->iobase +
  675. CRYPTO_CNTR0_IV0_REG);
  676. writel_relaxed(enciv32[1], pce_dev->iobase +
  677. CRYPTO_CNTR1_IV1_REG);
  678. writel_relaxed(enciv32[2], pce_dev->iobase +
  679. CRYPTO_CNTR2_IV2_REG);
  680. writel_relaxed(enciv32[3], pce_dev->iobase +
  681. CRYPTO_CNTR3_IV3_REG);
  682. }
  683. /* set number of counter bits */
  684. writel_relaxed(0xffff, pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
  685. if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
  686. cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
  687. CRYPTO_ENCR_KEY_SZ);
  688. cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
  689. } else {
  690. switch (q_req->encklen) {
  691. case AES128_KEY_SIZE:
  692. cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
  693. CRYPTO_ENCR_KEY_SZ);
  694. break;
  695. case AES192_KEY_SIZE:
  696. cfg |= (CRYPTO_ENCR_KEY_SZ_AES192 <<
  697. CRYPTO_ENCR_KEY_SZ);
  698. break;
  699. case AES256_KEY_SIZE:
  700. default:
  701. cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 <<
  702. CRYPTO_ENCR_KEY_SZ);
  703. /* check for null key. If null, use hw key*/
  704. for (i = 0; i < enck_size_in_word; i++) {
  705. if (enckey32[i] != 0)
  706. break;
  707. }
  708. if (i == enck_size_in_word)
  709. cfg |= 1 << CRYPTO_USE_HW_KEY;
  710. break;
  711. } /* end of switch (q_req->encklen) */
  712. cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
  713. if (pce_dev->aes_key_size != q_req->encklen)
  714. aes_key_chg = 1;
  715. else {
  716. for (i = 0; i < enck_size_in_word; i++) {
  717. if (enckey32[i] != pce_dev->aeskey[i])
  718. break;
  719. }
  720. aes_key_chg = (i == enck_size_in_word) ? 0 : 1;
  721. }
  722. if (aes_key_chg) {
  723. if (pce_dev->fastaes) {
  724. for (i = 0; i < enck_size_in_word;
  725. i++) {
  726. writel_relaxed(enckey32[i],
  727. pce_dev->iobase +
  728. CRYPTO_AES_RNDKEY0 +
  729. (i * sizeof(uint32_t)));
  730. }
  731. } else {
  732. /* size in bit */
  733. _aes_expand_key_schedule(
  734. q_req->encklen * 8,
  735. enckey32, aes_round_key);
  736. for (i = 0; i < CRYPTO_AES_RNDKEYS;
  737. i++) {
  738. writel_relaxed(aes_round_key[i],
  739. pce_dev->iobase +
  740. CRYPTO_AES_RNDKEY0 +
  741. (i * sizeof(uint32_t)));
  742. }
  743. }
  744. pce_dev->aes_key_size = q_req->encklen;
  745. for (i = 0; i < enck_size_in_word; i++)
  746. pce_dev->aeskey[i] = enckey32[i];
  747. } /*if (aes_key_chg) { */
  748. } /* else of if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
  749. break;
  750. } /* end of switch (q_req->mode) */
  751. if (q_req->dir == QCE_ENCRYPT)
  752. cfg |= (1 << CRYPTO_AUTH_POS);
  753. cfg |= ((q_req->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
  754. /* write encr seg cfg */
  755. writel_relaxed((q_req->cryptlen << CRYPTO_ENCR_SEG_SIZE) |
  756. (coffset & 0xffff), /* cipher offset */
  757. pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  758. /* write seg cfg and size */
  759. writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
  760. writel_relaxed(totallen, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  761. /* issue go to crypto */
  762. writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
  763. /* Ensure previous instructions (setting the GO register)
  764. * was completed before issuing a DMA transfer request
  765. */
  766. mb();
  767. return 0;
  768. };
  769. static int _aead_complete(struct qce_device *pce_dev)
  770. {
  771. struct aead_request *areq;
  772. struct crypto_aead *aead;
  773. uint32_t ivsize;
  774. uint32_t iv_out[4];
  775. unsigned char iv[4 * sizeof(uint32_t)];
  776. uint32_t status;
  777. areq = (struct aead_request *) pce_dev->areq;
  778. aead = crypto_aead_reqtfm(areq);
  779. ivsize = crypto_aead_ivsize(aead);
  780. if (areq->src != areq->dst) {
  781. qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
  782. DMA_FROM_DEVICE);
  783. }
  784. qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
  785. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  786. DMA_TO_DEVICE);
  787. dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
  788. ivsize, DMA_TO_DEVICE);
  789. qce_dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
  790. DMA_TO_DEVICE);
  791. /* check ce error status */
  792. status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
  793. if (status & (1 << CRYPTO_SW_ERR)) {
  794. pce_dev->err++;
  795. dev_err(pce_dev->pdev,
  796. "Qualcomm Crypto Error at 0x%x, status%x\n",
  797. pce_dev->phy_iobase, status);
  798. _init_ce_engine(pce_dev);
  799. clk_disable(pce_dev->ce_clk);
  800. pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
  801. return 0;
  802. };
  803. /* get iv out */
  804. if (pce_dev->mode == QCE_MODE_ECB) {
  805. clk_disable(pce_dev->ce_clk);
  806. pce_dev->qce_cb(areq, pce_dev->dig_result, NULL,
  807. pce_dev->chan_ce_in_status |
  808. pce_dev->chan_ce_out_status);
  809. } else {
  810. iv_out[0] = readl_relaxed(pce_dev->iobase +
  811. CRYPTO_CNTR0_IV0_REG);
  812. iv_out[1] = readl_relaxed(pce_dev->iobase +
  813. CRYPTO_CNTR1_IV1_REG);
  814. iv_out[2] = readl_relaxed(pce_dev->iobase +
  815. CRYPTO_CNTR2_IV2_REG);
  816. iv_out[3] = readl_relaxed(pce_dev->iobase +
  817. CRYPTO_CNTR3_IV3_REG);
  818. _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
  819. clk_disable(pce_dev->ce_clk);
  820. pce_dev->qce_cb(areq, pce_dev->dig_result, iv,
  821. pce_dev->chan_ce_in_status |
  822. pce_dev->chan_ce_out_status);
  823. };
  824. return 0;
  825. };
  826. static void _sha_complete(struct qce_device *pce_dev)
  827. {
  828. struct ahash_request *areq;
  829. uint32_t auth_data[2];
  830. uint32_t status;
  831. areq = (struct ahash_request *) pce_dev->areq;
  832. qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
  833. DMA_TO_DEVICE);
  834. /* check ce error status */
  835. status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
  836. if (status & (1 << CRYPTO_SW_ERR)) {
  837. pce_dev->err++;
  838. dev_err(pce_dev->pdev,
  839. "Qualcomm Crypto Error at 0x%x, status%x\n",
  840. pce_dev->phy_iobase, status);
  841. _init_ce_engine(pce_dev);
  842. clk_disable(pce_dev->ce_clk);
  843. pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO);
  844. return;
  845. };
  846. auth_data[0] = readl_relaxed(pce_dev->iobase +
  847. CRYPTO_AUTH_BYTECNT0_REG);
  848. auth_data[1] = readl_relaxed(pce_dev->iobase +
  849. CRYPTO_AUTH_BYTECNT1_REG);
  850. /* Ensure previous instruction (retriving byte count information)
  851. * was completed before disabling the clk.
  852. */
  853. mb();
  854. clk_disable(pce_dev->ce_clk);
  855. pce_dev->qce_cb(areq, pce_dev->dig_result, (unsigned char *)auth_data,
  856. pce_dev->chan_ce_in_status);
  857. };
  858. static int _ablk_cipher_complete(struct qce_device *pce_dev)
  859. {
  860. struct ablkcipher_request *areq;
  861. uint32_t iv_out[4];
  862. unsigned char iv[4 * sizeof(uint32_t)];
  863. uint32_t status;
  864. areq = (struct ablkcipher_request *) pce_dev->areq;
  865. if (areq->src != areq->dst) {
  866. qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
  867. pce_dev->dst_nents, DMA_FROM_DEVICE);
  868. }
  869. qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
  870. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  871. DMA_TO_DEVICE);
  872. /* check ce error status */
  873. status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
  874. if (status & (1 << CRYPTO_SW_ERR)) {
  875. pce_dev->err++;
  876. dev_err(pce_dev->pdev,
  877. "Qualcomm Crypto Error at 0x%x, status%x\n",
  878. pce_dev->phy_iobase, status);
  879. _init_ce_engine(pce_dev);
  880. clk_disable(pce_dev->ce_clk);
  881. pce_dev->qce_cb(areq, NULL, NULL, -ENXIO);
  882. return 0;
  883. };
  884. /* get iv out */
  885. if (pce_dev->mode == QCE_MODE_ECB) {
  886. clk_disable(pce_dev->ce_clk);
  887. pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status |
  888. pce_dev->chan_ce_out_status);
  889. } else {
  890. iv_out[0] = readl_relaxed(pce_dev->iobase +
  891. CRYPTO_CNTR0_IV0_REG);
  892. iv_out[1] = readl_relaxed(pce_dev->iobase +
  893. CRYPTO_CNTR1_IV1_REG);
  894. iv_out[2] = readl_relaxed(pce_dev->iobase +
  895. CRYPTO_CNTR2_IV2_REG);
  896. iv_out[3] = readl_relaxed(pce_dev->iobase +
  897. CRYPTO_CNTR3_IV3_REG);
  898. _net_words_to_byte_stream(iv_out, iv, sizeof(iv));
  899. clk_disable(pce_dev->ce_clk);
  900. pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status |
  901. pce_dev->chan_ce_out_status);
  902. }
  903. return 0;
  904. };
  905. static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc,
  906. unsigned int plen, unsigned int paddr, int *index)
  907. {
  908. while (plen > QCE_FIFO_SIZE) {
  909. pdesc->len = QCE_FIFO_SIZE;
  910. if (paddr > 0) {
  911. pdesc->addr = paddr;
  912. paddr += QCE_FIFO_SIZE;
  913. }
  914. plen -= pdesc->len;
  915. if (plen > 0) {
  916. *index = (*index) + 1;
  917. if ((*index) >= QCE_MAX_NUM_DESC)
  918. return -ENOMEM;
  919. pdesc++;
  920. }
  921. }
  922. if ((plen > 0) && (plen <= QCE_FIFO_SIZE)) {
  923. pdesc->len = plen;
  924. if (paddr > 0)
  925. pdesc->addr = paddr;
  926. }
  927. return 0;
  928. }
  929. static int _chain_sg_buffer_in(struct qce_device *pce_dev,
  930. struct scatterlist *sg, unsigned int nbytes)
  931. {
  932. unsigned int len;
  933. unsigned int dlen;
  934. struct dmov_desc *pdesc;
  935. pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
  936. /*
  937. * Two consective chunks may be handled by the old
  938. * buffer descriptor.
  939. */
  940. while (nbytes > 0) {
  941. len = min(nbytes, sg_dma_len(sg));
  942. dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
  943. nbytes -= len;
  944. if (dlen == 0) {
  945. pdesc->addr = sg_dma_address(sg);
  946. pdesc->len = len;
  947. if (pdesc->len > QCE_FIFO_SIZE) {
  948. if (qce_split_and_insert_dm_desc(pdesc,
  949. pdesc->len, sg_dma_address(sg),
  950. &pce_dev->ce_in_src_desc_index))
  951. return -EIO;
  952. }
  953. } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
  954. pdesc->len = dlen + len;
  955. if (pdesc->len > QCE_FIFO_SIZE) {
  956. if (qce_split_and_insert_dm_desc(pdesc,
  957. pdesc->len, pdesc->addr,
  958. &pce_dev->ce_in_src_desc_index))
  959. return -EIO;
  960. }
  961. } else {
  962. pce_dev->ce_in_src_desc_index++;
  963. if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
  964. return -ENOMEM;
  965. pdesc++;
  966. pdesc->len = len;
  967. pdesc->addr = sg_dma_address(sg);
  968. if (pdesc->len > QCE_FIFO_SIZE) {
  969. if (qce_split_and_insert_dm_desc(pdesc,
  970. pdesc->len, sg_dma_address(sg),
  971. &pce_dev->ce_in_src_desc_index))
  972. return -EIO;
  973. }
  974. }
  975. if (nbytes > 0)
  976. sg = scatterwalk_sg_next(sg);
  977. }
  978. return 0;
  979. }
  980. static int _chain_pm_buffer_in(struct qce_device *pce_dev,
  981. unsigned int pmem, unsigned int nbytes)
  982. {
  983. unsigned int dlen;
  984. struct dmov_desc *pdesc;
  985. pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
  986. dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
  987. if (dlen == 0) {
  988. pdesc->addr = pmem;
  989. pdesc->len = nbytes;
  990. } else if (pmem == (pdesc->addr + dlen)) {
  991. pdesc->len = dlen + nbytes;
  992. } else {
  993. pce_dev->ce_in_src_desc_index++;
  994. if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC)
  995. return -ENOMEM;
  996. pdesc++;
  997. pdesc->len = nbytes;
  998. pdesc->addr = pmem;
  999. }
  1000. return 0;
  1001. }
  1002. static void _chain_buffer_in_init(struct qce_device *pce_dev)
  1003. {
  1004. struct dmov_desc *pdesc;
  1005. pce_dev->ce_in_src_desc_index = 0;
  1006. pce_dev->ce_in_dst_desc_index = 0;
  1007. pdesc = pce_dev->ce_in_src_desc;
  1008. pdesc->len = 0;
  1009. }
  1010. static void _ce_in_final(struct qce_device *pce_dev, int ncmd, unsigned total)
  1011. {
  1012. struct dmov_desc *pdesc;
  1013. dmov_sg *pcmd;
  1014. pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index;
  1015. pdesc->len |= ADM_DESC_LAST;
  1016. pdesc = pce_dev->ce_in_dst_desc;
  1017. if (total > QCE_FIFO_SIZE) {
  1018. qce_split_and_insert_dm_desc(pdesc, total, 0,
  1019. &pce_dev->ce_in_dst_desc_index);
  1020. pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index;
  1021. pdesc->len |= ADM_DESC_LAST;
  1022. } else
  1023. pdesc->len = ADM_DESC_LAST | total;
  1024. pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
  1025. if (ncmd == 1)
  1026. pcmd->cmd |= CMD_LC;
  1027. else {
  1028. dmov_s *pscmd;
  1029. pcmd->cmd &= ~CMD_LC;
  1030. pcmd++;
  1031. pscmd = (dmov_s *)pcmd;
  1032. pscmd->cmd |= CMD_LC;
  1033. }
  1034. #ifdef QCE_DEBUG
  1035. dev_info(pce_dev->pdev, "_ce_in_final %d\n",
  1036. pce_dev->ce_in_src_desc_index);
  1037. #endif
  1038. }
  1039. #ifdef QCE_DEBUG
  1040. static void _ce_in_dump(struct qce_device *pce_dev)
  1041. {
  1042. int i;
  1043. struct dmov_desc *pdesc;
  1044. dev_info(pce_dev->pdev, "_ce_in_dump: src\n");
  1045. for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) {
  1046. pdesc = pce_dev->ce_in_src_desc + i;
  1047. dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
  1048. pdesc->len);
  1049. }
  1050. dev_info(pce_dev->pdev, "_ce_in_dump: dst\n");
  1051. for (i = 0; i <= pce_dev->ce_in_dst_desc_index; i++) {
  1052. pdesc = pce_dev->ce_in_dst_desc + i;
  1053. dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
  1054. pdesc->len);
  1055. }
  1056. };
  1057. static void _ce_out_dump(struct qce_device *pce_dev)
  1058. {
  1059. int i;
  1060. struct dmov_desc *pdesc;
  1061. dev_info(pce_dev->pdev, "_ce_out_dump: src\n");
  1062. for (i = 0; i <= pce_dev->ce_out_src_desc_index; i++) {
  1063. pdesc = pce_dev->ce_out_src_desc + i;
  1064. dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
  1065. pdesc->len);
  1066. }
  1067. dev_info(pce_dev->pdev, "_ce_out_dump: dst\n");
  1068. for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) {
  1069. pdesc = pce_dev->ce_out_dst_desc + i;
  1070. dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr,
  1071. pdesc->len);
  1072. }
  1073. };
  1074. #endif
  1075. static int _chain_sg_buffer_out(struct qce_device *pce_dev,
  1076. struct scatterlist *sg, unsigned int nbytes)
  1077. {
  1078. unsigned int len;
  1079. unsigned int dlen;
  1080. struct dmov_desc *pdesc;
  1081. pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
  1082. /*
  1083. * Two consective chunks may be handled by the old
  1084. * buffer descriptor.
  1085. */
  1086. while (nbytes > 0) {
  1087. len = min(nbytes, sg_dma_len(sg));
  1088. dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
  1089. nbytes -= len;
  1090. if (dlen == 0) {
  1091. pdesc->addr = sg_dma_address(sg);
  1092. pdesc->len = len;
  1093. if (pdesc->len > QCE_FIFO_SIZE) {
  1094. if (qce_split_and_insert_dm_desc(pdesc,
  1095. pdesc->len, sg_dma_address(sg),
  1096. &pce_dev->ce_out_dst_desc_index))
  1097. return -EIO;
  1098. }
  1099. } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) {
  1100. pdesc->len = dlen + len;
  1101. if (pdesc->len > QCE_FIFO_SIZE) {
  1102. if (qce_split_and_insert_dm_desc(pdesc,
  1103. pdesc->len, pdesc->addr,
  1104. &pce_dev->ce_out_dst_desc_index))
  1105. return -EIO;
  1106. }
  1107. } else {
  1108. pce_dev->ce_out_dst_desc_index++;
  1109. if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
  1110. return -EIO;
  1111. pdesc++;
  1112. pdesc->len = len;
  1113. pdesc->addr = sg_dma_address(sg);
  1114. if (pdesc->len > QCE_FIFO_SIZE) {
  1115. if (qce_split_and_insert_dm_desc(pdesc,
  1116. pdesc->len, sg_dma_address(sg),
  1117. &pce_dev->ce_out_dst_desc_index))
  1118. return -EIO;
  1119. }
  1120. }
  1121. if (nbytes > 0)
  1122. sg = scatterwalk_sg_next(sg);
  1123. }
  1124. return 0;
  1125. }
  1126. static int _chain_pm_buffer_out(struct qce_device *pce_dev,
  1127. unsigned int pmem, unsigned int nbytes)
  1128. {
  1129. unsigned int dlen;
  1130. struct dmov_desc *pdesc;
  1131. pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
  1132. dlen = pdesc->len & ADM_DESC_LENGTH_MASK;
  1133. if (dlen == 0) {
  1134. pdesc->addr = pmem;
  1135. pdesc->len = nbytes;
  1136. } else if (pmem == (pdesc->addr + dlen)) {
  1137. pdesc->len = dlen + nbytes;
  1138. } else {
  1139. pce_dev->ce_out_dst_desc_index++;
  1140. if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC)
  1141. return -EIO;
  1142. pdesc++;
  1143. pdesc->len = nbytes;
  1144. pdesc->addr = pmem;
  1145. }
  1146. return 0;
  1147. };
  1148. static void _chain_buffer_out_init(struct qce_device *pce_dev)
  1149. {
  1150. struct dmov_desc *pdesc;
  1151. pce_dev->ce_out_dst_desc_index = 0;
  1152. pce_dev->ce_out_src_desc_index = 0;
  1153. pdesc = pce_dev->ce_out_dst_desc;
  1154. pdesc->len = 0;
  1155. };
  1156. static void _ce_out_final(struct qce_device *pce_dev, int ncmd, unsigned total)
  1157. {
  1158. struct dmov_desc *pdesc;
  1159. dmov_sg *pcmd;
  1160. pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index;
  1161. pdesc->len |= ADM_DESC_LAST;
  1162. pdesc = pce_dev->ce_out_src_desc;
  1163. if (total > QCE_FIFO_SIZE) {
  1164. qce_split_and_insert_dm_desc(pdesc, total, 0,
  1165. &pce_dev->ce_out_src_desc_index);
  1166. pdesc = pce_dev->ce_out_src_desc +
  1167. pce_dev->ce_out_src_desc_index;
  1168. pdesc->len |= ADM_DESC_LAST;
  1169. } else
  1170. pdesc->len = ADM_DESC_LAST | total;
  1171. pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
  1172. if (ncmd == 1)
  1173. pcmd->cmd |= CMD_LC;
  1174. else {
  1175. dmov_s *pscmd;
  1176. pcmd->cmd &= ~CMD_LC;
  1177. pcmd++;
  1178. pscmd = (dmov_s *)pcmd;
  1179. pscmd->cmd |= CMD_LC;
  1180. }
  1181. #ifdef QCE_DEBUG
  1182. dev_info(pce_dev->pdev, "_ce_out_final %d\n",
  1183. pce_dev->ce_out_dst_desc_index);
  1184. #endif
  1185. };
  1186. static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
  1187. unsigned int result, struct msm_dmov_errdata *err)
  1188. {
  1189. struct qce_device *pce_dev;
  1190. pce_dev = (struct qce_device *) cmd_ptr->user;
  1191. if (result != ADM_STATUS_OK) {
  1192. dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
  1193. result);
  1194. pce_dev->chan_ce_in_status = -1;
  1195. } else
  1196. pce_dev->chan_ce_in_status = 0;
  1197. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
  1198. if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
  1199. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
  1200. pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
  1201. /* done */
  1202. _aead_complete(pce_dev);
  1203. }
  1204. };
  1205. static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
  1206. unsigned int result, struct msm_dmov_errdata *err)
  1207. {
  1208. struct qce_device *pce_dev;
  1209. pce_dev = (struct qce_device *) cmd_ptr->user;
  1210. if (result != ADM_STATUS_OK) {
  1211. dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
  1212. result);
  1213. pce_dev->chan_ce_out_status = -1;
  1214. } else {
  1215. pce_dev->chan_ce_out_status = 0;
  1216. };
  1217. pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
  1218. if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
  1219. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
  1220. pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
  1221. /* done */
  1222. _aead_complete(pce_dev);
  1223. }
  1224. };
  1225. static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
  1226. unsigned int result, struct msm_dmov_errdata *err)
  1227. {
  1228. struct qce_device *pce_dev;
  1229. pce_dev = (struct qce_device *) cmd_ptr->user;
  1230. if (result != ADM_STATUS_OK) {
  1231. dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
  1232. result);
  1233. pce_dev->chan_ce_in_status = -1;
  1234. } else
  1235. pce_dev->chan_ce_in_status = 0;
  1236. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
  1237. _sha_complete(pce_dev);
  1238. };
  1239. static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
  1240. unsigned int result, struct msm_dmov_errdata *err)
  1241. {
  1242. struct qce_device *pce_dev;
  1243. pce_dev = (struct qce_device *) cmd_ptr->user;
  1244. if (result != ADM_STATUS_OK) {
  1245. dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
  1246. result);
  1247. pce_dev->chan_ce_in_status = -1;
  1248. } else
  1249. pce_dev->chan_ce_in_status = 0;
  1250. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
  1251. if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
  1252. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
  1253. pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
  1254. /* done */
  1255. _ablk_cipher_complete(pce_dev);
  1256. }
  1257. };
  1258. static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
  1259. unsigned int result, struct msm_dmov_errdata *err)
  1260. {
  1261. struct qce_device *pce_dev;
  1262. pce_dev = (struct qce_device *) cmd_ptr->user;
  1263. if (result != ADM_STATUS_OK) {
  1264. dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
  1265. result);
  1266. pce_dev->chan_ce_out_status = -1;
  1267. } else {
  1268. pce_dev->chan_ce_out_status = 0;
  1269. };
  1270. pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
  1271. if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
  1272. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
  1273. pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
  1274. /* done */
  1275. _ablk_cipher_complete(pce_dev);
  1276. }
  1277. };
  1278. static int _setup_cmd_template(struct qce_device *pce_dev)
  1279. {
  1280. dmov_sg *pcmd;
  1281. dmov_s *pscmd;
  1282. struct dmov_desc *pdesc;
  1283. unsigned char *vaddr;
  1284. int i = 0;
  1285. /* Divide up the 4K coherent memory */
  1286. /* 1. ce_in channel 1st command src descriptors, 128 entries */
  1287. vaddr = pce_dev->coh_vmem;
  1288. vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
  1289. pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr;
  1290. pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem +
  1291. (vaddr - pce_dev->coh_vmem);
  1292. vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
  1293. /* 2. ce_in channel 1st command dst descriptor, 1 entry */
  1294. vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
  1295. pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr;
  1296. pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem +
  1297. (vaddr - pce_dev->coh_vmem);
  1298. vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
  1299. /*
  1300. * 3. ce_in channel command list of one scatter gather command
  1301. * and one simple command.
  1302. */
  1303. pce_dev->cmd_list_ce_in = vaddr;
  1304. pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem
  1305. + (vaddr - pce_dev->coh_vmem);
  1306. vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
  1307. /* 4. authentication result. */
  1308. pce_dev->dig_result = vaddr;
  1309. pce_dev->phy_dig_result = pce_dev->coh_pmem +
  1310. (vaddr - pce_dev->coh_vmem);
  1311. vaddr = vaddr + SHA256_DIGESTSIZE;
  1312. /*
  1313. * 5. ce_out channel command list of one scatter gather command
  1314. * and one simple command.
  1315. */
  1316. pce_dev->cmd_list_ce_out = vaddr;
  1317. pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem
  1318. + (vaddr - pce_dev->coh_vmem);
  1319. vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg);
  1320. /* 6. ce_out channel command src descriptors, 1 entry */
  1321. vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
  1322. pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr;
  1323. pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem
  1324. + (vaddr - pce_dev->coh_vmem);
  1325. vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
  1326. /* 7. ce_out channel command dst descriptors, 128 entries. */
  1327. vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16);
  1328. pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr;
  1329. pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem
  1330. + (vaddr - pce_dev->coh_vmem);
  1331. vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC);
  1332. /* 8. pad area. */
  1333. pce_dev->ce_pad = vaddr;
  1334. pce_dev->phy_ce_pad = pce_dev->coh_pmem +
  1335. (vaddr - pce_dev->coh_vmem);
  1336. vaddr = vaddr + ADM_CE_BLOCK_SIZE;
  1337. /* 9. ce_in channel command pointer list. */
  1338. vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 16);
  1339. pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr;
  1340. pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem +
  1341. (vaddr - pce_dev->coh_vmem);
  1342. vaddr = vaddr + sizeof(unsigned char *);
  1343. /* 10. ce_ou channel command pointer list. */
  1344. vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 16);
  1345. pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr;
  1346. pce_dev->phy_cmd_pointer_list_ce_out = pce_dev->coh_pmem +
  1347. (vaddr - pce_dev->coh_vmem);
  1348. vaddr = vaddr + sizeof(unsigned char *);
  1349. /* 11. throw away area to store by-pass data from ce_out. */
  1350. pce_dev->ce_out_ignore = (unsigned char *) vaddr;
  1351. pce_dev->phy_ce_out_ignore = pce_dev->coh_pmem
  1352. + (vaddr - pce_dev->coh_vmem);
  1353. pce_dev->ce_out_ignore_size = (2 * PAGE_SIZE) - (vaddr -
  1354. pce_dev->coh_vmem); /* at least 1.5 K of space */
  1355. /*
  1356. * The first command of command list ce_in is for the input of
  1357. * concurrent operation of encrypt/decrypt or for the input
  1358. * of authentication.
  1359. */
  1360. pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in;
  1361. /* swap byte and half word , dst crci , scatter gather */
  1362. pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS |
  1363. CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG;
  1364. pdesc = pce_dev->ce_in_src_desc;
  1365. pdesc->addr = 0; /* to be filled in each operation */
  1366. pdesc->len = 0; /* to be filled in each operation */
  1367. pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc;
  1368. pdesc = pce_dev->ce_in_dst_desc;
  1369. for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
  1370. pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
  1371. pdesc->len = 0; /* to be filled in each operation */
  1372. pdesc++;
  1373. }
  1374. pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc;
  1375. pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
  1376. DST_INDEX_SG_CMD(0);
  1377. pcmd++;
  1378. /*
  1379. * The second command is for the digested data of
  1380. * hashing operation only. For others, this command is not used.
  1381. */
  1382. pscmd = (dmov_s *) pcmd;
  1383. /* last command, swap byte, half word, src crci, single */
  1384. pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
  1385. CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
  1386. pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
  1387. pscmd->len = SHA256_DIGESTSIZE; /* to be filled. */
  1388. pscmd->dst = (unsigned) pce_dev->phy_dig_result;
  1389. /* setup command pointer list */
  1390. *(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST |
  1391. DMOV_CMD_ADDR((unsigned int)
  1392. pce_dev->phy_cmd_list_ce_in));
  1393. pce_dev->chan_ce_in_cmd->user = (void *) pce_dev;
  1394. pce_dev->chan_ce_in_cmd->exec_func = NULL;
  1395. pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR(
  1396. (unsigned int) pce_dev->phy_cmd_pointer_list_ce_in);
  1397. /*
  1398. * The first command in the command list ce_out.
  1399. * It is for encry/decryp output.
  1400. * If hashing only, ce_out is not used.
  1401. */
  1402. pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out;
  1403. /* swap byte, half word, source crci, scatter gather */
  1404. pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
  1405. CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG;
  1406. pdesc = pce_dev->ce_out_src_desc;
  1407. for (i = 0; i < QCE_MAX_NUM_DESC; i++) {
  1408. pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase);
  1409. pdesc->len = 0; /* to be filled in each operation */
  1410. pdesc++;
  1411. }
  1412. pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc;
  1413. pdesc = pce_dev->ce_out_dst_desc;
  1414. pdesc->addr = 0; /* to be filled in each operation */
  1415. pdesc->len = 0; /* to be filled in each operation */
  1416. pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc;
  1417. pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) |
  1418. DST_INDEX_SG_CMD(0);
  1419. pcmd++;
  1420. /*
  1421. * The second command is for digested data of esp operation.
  1422. * For ciphering, this command is not used.
  1423. */
  1424. pscmd = (dmov_s *) pcmd;
  1425. /* last command, swap byte, half word, src crci, single */
  1426. pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS |
  1427. CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE;
  1428. pscmd->src = (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase);
  1429. pscmd->len = SHA1_DIGESTSIZE; /* we only support hmac(sha1) */
  1430. pscmd->dst = (unsigned) pce_dev->phy_dig_result;
  1431. /* setup command pointer list */
  1432. *(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST |
  1433. DMOV_CMD_ADDR((unsigned int)pce_dev->
  1434. phy_cmd_list_ce_out));
  1435. pce_dev->chan_ce_out_cmd->user = pce_dev;
  1436. pce_dev->chan_ce_out_cmd->exec_func = NULL;
  1437. pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR(
  1438. (unsigned int) pce_dev->phy_cmd_pointer_list_ce_out);
  1439. return 0;
  1440. };
  1441. static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out)
  1442. {
  1443. if (ce_in)
  1444. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG;
  1445. else
  1446. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
  1447. if (ce_out)
  1448. pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG;
  1449. else
  1450. pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
  1451. if (ce_in)
  1452. msm_dmov_enqueue_cmd(pce_dev->chan_ce_in,
  1453. pce_dev->chan_ce_in_cmd);
  1454. if (ce_out)
  1455. msm_dmov_enqueue_cmd(pce_dev->chan_ce_out,
  1456. pce_dev->chan_ce_out_cmd);
  1457. return 0;
  1458. };
  1459. static void _f9_complete(struct qce_device *pce_dev)
  1460. {
  1461. uint32_t mac_i;
  1462. uint32_t status;
  1463. dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
  1464. pce_dev->ota_size, DMA_TO_DEVICE);
  1465. /* check ce error status */
  1466. status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
  1467. if (status & (1 << CRYPTO_SW_ERR)) {
  1468. pce_dev->err++;
  1469. dev_err(pce_dev->pdev,
  1470. "Qualcomm Crypto Error at 0x%x, status%x\n",
  1471. pce_dev->phy_iobase, status);
  1472. _init_ce_engine(pce_dev);
  1473. pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
  1474. return;
  1475. };
  1476. mac_i = readl_relaxed(pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
  1477. pce_dev->qce_cb(pce_dev->areq, (void *) mac_i, NULL,
  1478. pce_dev->chan_ce_in_status);
  1479. };
  1480. static void _f8_complete(struct qce_device *pce_dev)
  1481. {
  1482. uint32_t status;
  1483. if (pce_dev->phy_ota_dst != 0)
  1484. dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
  1485. pce_dev->ota_size, DMA_FROM_DEVICE);
  1486. if (pce_dev->phy_ota_src != 0)
  1487. dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
  1488. pce_dev->ota_size, (pce_dev->phy_ota_dst) ?
  1489. DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
  1490. /* check ce error status */
  1491. status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
  1492. if (status & (1 << CRYPTO_SW_ERR)) {
  1493. pce_dev->err++;
  1494. dev_err(pce_dev->pdev,
  1495. "Qualcomm Crypto Error at 0x%x, status%x\n",
  1496. pce_dev->phy_iobase, status);
  1497. _init_ce_engine(pce_dev);
  1498. pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO);
  1499. return;
  1500. };
  1501. pce_dev->qce_cb(pce_dev->areq, NULL, NULL,
  1502. pce_dev->chan_ce_in_status |
  1503. pce_dev->chan_ce_out_status);
  1504. };
  1505. static void _f9_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
  1506. unsigned int result, struct msm_dmov_errdata *err)
  1507. {
  1508. struct qce_device *pce_dev;
  1509. pce_dev = (struct qce_device *) cmd_ptr->user;
  1510. if (result != ADM_STATUS_OK) {
  1511. dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
  1512. result);
  1513. pce_dev->chan_ce_in_status = -1;
  1514. } else
  1515. pce_dev->chan_ce_in_status = 0;
  1516. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
  1517. _f9_complete(pce_dev);
  1518. };
  1519. static void _f8_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr,
  1520. unsigned int result, struct msm_dmov_errdata *err)
  1521. {
  1522. struct qce_device *pce_dev;
  1523. pce_dev = (struct qce_device *) cmd_ptr->user;
  1524. if (result != ADM_STATUS_OK) {
  1525. dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
  1526. result);
  1527. pce_dev->chan_ce_in_status = -1;
  1528. } else
  1529. pce_dev->chan_ce_in_status = 0;
  1530. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP;
  1531. if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) {
  1532. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
  1533. pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
  1534. /* done */
  1535. _f8_complete(pce_dev);
  1536. }
  1537. };
  1538. static void _f8_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr,
  1539. unsigned int result, struct msm_dmov_errdata *err)
  1540. {
  1541. struct qce_device *pce_dev;
  1542. pce_dev = (struct qce_device *) cmd_ptr->user;
  1543. if (result != ADM_STATUS_OK) {
  1544. dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n",
  1545. result);
  1546. pce_dev->chan_ce_out_status = -1;
  1547. } else {
  1548. pce_dev->chan_ce_out_status = 0;
  1549. };
  1550. pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP;
  1551. if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) {
  1552. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
  1553. pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
  1554. /* done */
  1555. _f8_complete(pce_dev);
  1556. }
  1557. };
  1558. static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req)
  1559. {
  1560. uint32_t cfg;
  1561. uint32_t ikey[OTA_KEY_SIZE/sizeof(uint32_t)];
  1562. _byte_stream_to_net_words(ikey, &req->ikey[0], OTA_KEY_SIZE);
  1563. writel_relaxed(ikey[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG);
  1564. writel_relaxed(ikey[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG);
  1565. writel_relaxed(ikey[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG);
  1566. writel_relaxed(ikey[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG);
  1567. writel_relaxed(req->last_bits, pce_dev->iobase + CRYPTO_AUTH_IV4_REG);
  1568. writel_relaxed(req->fresh, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
  1569. writel_relaxed(req->count_i, pce_dev->iobase +
  1570. CRYPTO_AUTH_BYTECNT1_REG);
  1571. /* write auth_seg_cfg */
  1572. writel_relaxed((uint32_t)req->msize << CRYPTO_AUTH_SEG_SIZE,
  1573. pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1574. /* write seg_cfg */
  1575. cfg = (CRYPTO_AUTH_ALG_F9 << CRYPTO_AUTH_ALG) | (1 << CRYPTO_FIRST) |
  1576. (1 << CRYPTO_LAST);
  1577. if (req->algorithm == QCE_OTA_ALGO_KASUMI)
  1578. cfg |= (CRYPTO_AUTH_SIZE_UIA1 << CRYPTO_AUTH_SIZE);
  1579. else
  1580. cfg |= (CRYPTO_AUTH_SIZE_UIA2 << CRYPTO_AUTH_SIZE);
  1581. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  1582. cfg |= 1 << CRYPTO_F9_DIRECTION;
  1583. writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
  1584. /* write seg_size */
  1585. writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1586. /* issue go to crypto */
  1587. writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
  1588. /*
  1589. * barrier to ensure previous instructions
  1590. * (including GO) to CE finish before issue DMA transfer
  1591. * request.
  1592. */
  1593. mb();
  1594. return 0;
  1595. };
  1596. static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
  1597. bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
  1598. uint16_t cipher_size)
  1599. {
  1600. uint32_t cfg;
  1601. uint32_t ckey[OTA_KEY_SIZE/sizeof(uint32_t)];
  1602. if ((key_stream_mode && (req->data_len & 0xf || npkts > 1)) ||
  1603. (req->bearer >= QCE_OTA_MAX_BEARER))
  1604. return -EINVAL;
  1605. /* write seg_cfg */
  1606. cfg = (CRYPTO_ENCR_ALG_F8 << CRYPTO_ENCR_ALG) | (1 << CRYPTO_FIRST) |
  1607. (1 << CRYPTO_LAST);
  1608. if (req->algorithm == QCE_OTA_ALGO_KASUMI)
  1609. cfg |= (CRYPTO_ENCR_KEY_SZ_UEA1 << CRYPTO_ENCR_KEY_SZ);
  1610. else
  1611. cfg |= (CRYPTO_ENCR_KEY_SZ_UEA2 << CRYPTO_ENCR_KEY_SZ);
  1612. if (key_stream_mode)
  1613. cfg |= 1 << CRYPTO_F8_KEYSTREAM_ENABLE;
  1614. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  1615. cfg |= 1 << CRYPTO_F8_DIRECTION;
  1616. writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG);
  1617. /* write seg_size */
  1618. writel_relaxed(req->data_len, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1619. /* write 0 to auth_size, auth_offset */
  1620. writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1621. /* write encr_seg_cfg seg_size, seg_offset */
  1622. writel_relaxed((((uint32_t) cipher_size) << CRYPTO_ENCR_SEG_SIZE) |
  1623. (cipher_offset & 0xffff),
  1624. pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1625. /* write keys */
  1626. _byte_stream_to_net_words(ckey, &req->ckey[0], OTA_KEY_SIZE);
  1627. writel_relaxed(ckey[0], pce_dev->iobase + CRYPTO_DES_KEY0_REG);
  1628. writel_relaxed(ckey[1], pce_dev->iobase + CRYPTO_DES_KEY1_REG);
  1629. writel_relaxed(ckey[2], pce_dev->iobase + CRYPTO_DES_KEY2_REG);
  1630. writel_relaxed(ckey[3], pce_dev->iobase + CRYPTO_DES_KEY3_REG);
  1631. /* write cntr0_iv0 for countC */
  1632. writel_relaxed(req->count_c, pce_dev->iobase + CRYPTO_CNTR0_IV0_REG);
  1633. /* write cntr1_iv1 for nPkts, and bearer */
  1634. if (npkts == 1)
  1635. npkts = 0;
  1636. writel_relaxed(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
  1637. npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
  1638. pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
  1639. /* issue go to crypto */
  1640. writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG);
  1641. /*
  1642. * barrier to ensure previous instructions
  1643. * (including GO) to CE finish before issue DMA transfer
  1644. * request.
  1645. */
  1646. mb();
  1647. return 0;
  1648. };
  1649. struct qce_pm_table qce_pm_table = {NULL, NULL};
  1650. EXPORT_SYMBOL(qce_pm_table);
  1651. int qce_aead_req(void *handle, struct qce_req *q_req)
  1652. {
  1653. struct qce_device *pce_dev = (struct qce_device *) handle;
  1654. struct aead_request *areq = (struct aead_request *) q_req->areq;
  1655. struct crypto_aead *aead = crypto_aead_reqtfm(areq);
  1656. uint32_t ivsize = crypto_aead_ivsize(aead);
  1657. uint32_t totallen;
  1658. uint32_t pad_len;
  1659. uint32_t authsize = crypto_aead_authsize(aead);
  1660. int rc = 0;
  1661. q_req->ivsize = ivsize;
  1662. if (q_req->dir == QCE_ENCRYPT)
  1663. q_req->cryptlen = areq->cryptlen;
  1664. else
  1665. q_req->cryptlen = areq->cryptlen - authsize;
  1666. if ((q_req->cryptlen > UINT_MAX - ivsize) ||
  1667. (q_req->cryptlen + ivsize > UINT_MAX - areq->assoclen)) {
  1668. pr_err("Integer overflow on total aead req length.\n");
  1669. return -EINVAL;
  1670. }
  1671. totallen = q_req->cryptlen + ivsize + areq->assoclen;
  1672. pad_len = ALIGN(totallen, ADM_CE_BLOCK_SIZE) - totallen;
  1673. _chain_buffer_in_init(pce_dev);
  1674. _chain_buffer_out_init(pce_dev);
  1675. pce_dev->assoc_nents = 0;
  1676. pce_dev->phy_iv_in = 0;
  1677. pce_dev->src_nents = 0;
  1678. pce_dev->dst_nents = 0;
  1679. pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
  1680. qce_dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
  1681. DMA_TO_DEVICE);
  1682. if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) {
  1683. rc = -ENOMEM;
  1684. goto bad;
  1685. }
  1686. /* cipher iv for input */
  1687. pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
  1688. ivsize, DMA_TO_DEVICE);
  1689. if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_iv_in, ivsize) < 0) {
  1690. rc = -ENOMEM;
  1691. goto bad;
  1692. }
  1693. /* for output, ignore associated data and cipher iv */
  1694. if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore,
  1695. ivsize + areq->assoclen) < 0) {
  1696. rc = -ENOMEM;
  1697. goto bad;
  1698. }
  1699. /* cipher input */
  1700. pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen);
  1701. qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
  1702. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  1703. DMA_TO_DEVICE);
  1704. if (_chain_sg_buffer_in(pce_dev, areq->src, q_req->cryptlen) < 0) {
  1705. rc = -ENOMEM;
  1706. goto bad;
  1707. }
  1708. /* cipher output */
  1709. if (areq->src != areq->dst) {
  1710. pce_dev->dst_nents = count_sg(areq->dst, q_req->cryptlen);
  1711. qce_dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
  1712. DMA_FROM_DEVICE);
  1713. };
  1714. if (_chain_sg_buffer_out(pce_dev, areq->dst, q_req->cryptlen) < 0) {
  1715. rc = -ENOMEM;
  1716. goto bad;
  1717. }
  1718. /* pad data */
  1719. if (pad_len) {
  1720. if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
  1721. pad_len) < 0) {
  1722. rc = -ENOMEM;
  1723. goto bad;
  1724. }
  1725. if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
  1726. pad_len) < 0) {
  1727. rc = -ENOMEM;
  1728. goto bad;
  1729. }
  1730. }
  1731. /* finalize the ce_in and ce_out channels command lists */
  1732. _ce_in_final(pce_dev, 1, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
  1733. _ce_out_final(pce_dev, 2, ALIGN(totallen, ADM_CE_BLOCK_SIZE));
  1734. /* set up crypto device */
  1735. rc = _ce_setup(pce_dev, q_req, totallen, ivsize + areq->assoclen);
  1736. if (rc < 0)
  1737. goto bad;
  1738. /* setup for callback, and issue command to adm */
  1739. pce_dev->areq = q_req->areq;
  1740. pce_dev->qce_cb = q_req->qce_cb;
  1741. pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back;
  1742. pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back;
  1743. rc = _qce_start_dma(pce_dev, true, true);
  1744. if (rc == 0)
  1745. return 0;
  1746. bad:
  1747. if (pce_dev->assoc_nents) {
  1748. qce_dma_unmap_sg(pce_dev->pdev, areq->assoc,
  1749. pce_dev->assoc_nents, DMA_TO_DEVICE);
  1750. }
  1751. if (pce_dev->phy_iv_in) {
  1752. dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
  1753. ivsize, DMA_TO_DEVICE);
  1754. }
  1755. if (pce_dev->src_nents) {
  1756. qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
  1757. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  1758. DMA_TO_DEVICE);
  1759. }
  1760. if (pce_dev->dst_nents) {
  1761. qce_dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
  1762. DMA_FROM_DEVICE);
  1763. }
  1764. return rc;
  1765. }
  1766. EXPORT_SYMBOL(qce_aead_req);
  1767. int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
  1768. {
  1769. int rc = 0;
  1770. struct qce_device *pce_dev = (struct qce_device *) handle;
  1771. struct ablkcipher_request *areq = (struct ablkcipher_request *)
  1772. c_req->areq;
  1773. uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE)
  1774. - areq->nbytes;
  1775. _chain_buffer_in_init(pce_dev);
  1776. _chain_buffer_out_init(pce_dev);
  1777. pce_dev->src_nents = 0;
  1778. pce_dev->dst_nents = 0;
  1779. /* cipher input */
  1780. pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
  1781. qce_dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
  1782. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  1783. DMA_TO_DEVICE);
  1784. if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) {
  1785. rc = -ENOMEM;
  1786. goto bad;
  1787. }
  1788. /* cipher output */
  1789. if (areq->src != areq->dst) {
  1790. pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
  1791. qce_dma_map_sg(pce_dev->pdev, areq->dst,
  1792. pce_dev->dst_nents, DMA_FROM_DEVICE);
  1793. };
  1794. if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) {
  1795. rc = -ENOMEM;
  1796. goto bad;
  1797. }
  1798. /* pad data */
  1799. if (pad_len) {
  1800. if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
  1801. pad_len) < 0) {
  1802. rc = -ENOMEM;
  1803. goto bad;
  1804. }
  1805. if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
  1806. pad_len) < 0) {
  1807. rc = -ENOMEM;
  1808. goto bad;
  1809. }
  1810. }
  1811. /* finalize the ce_in and ce_out channels command lists */
  1812. _ce_in_final(pce_dev, 1, areq->nbytes + pad_len);
  1813. _ce_out_final(pce_dev, 1, areq->nbytes + pad_len);
  1814. #ifdef QCE_DEBUG
  1815. _ce_in_dump(pce_dev);
  1816. _ce_out_dump(pce_dev);
  1817. #endif
  1818. /* set up crypto device */
  1819. rc = _ce_setup(pce_dev, c_req, areq->nbytes, 0);
  1820. if (rc < 0)
  1821. goto bad;
  1822. /* setup for callback, and issue command to adm */
  1823. pce_dev->areq = areq;
  1824. pce_dev->qce_cb = c_req->qce_cb;
  1825. pce_dev->chan_ce_in_cmd->complete_func =
  1826. _ablk_cipher_ce_in_call_back;
  1827. pce_dev->chan_ce_out_cmd->complete_func =
  1828. _ablk_cipher_ce_out_call_back;
  1829. rc = _qce_start_dma(pce_dev, true, true);
  1830. if (rc == 0)
  1831. return 0;
  1832. bad:
  1833. if (pce_dev->dst_nents) {
  1834. qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
  1835. pce_dev->dst_nents, DMA_FROM_DEVICE);
  1836. }
  1837. if (pce_dev->src_nents) {
  1838. qce_dma_unmap_sg(pce_dev->pdev, areq->src,
  1839. pce_dev->src_nents,
  1840. (areq->src == areq->dst) ?
  1841. DMA_BIDIRECTIONAL :
  1842. DMA_TO_DEVICE);
  1843. }
  1844. return rc;
  1845. }
  1846. EXPORT_SYMBOL(qce_ablk_cipher_req);
  1847. int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
  1848. {
  1849. struct qce_device *pce_dev = (struct qce_device *) handle;
  1850. int rc;
  1851. uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size;
  1852. struct ahash_request *areq = (struct ahash_request *)sreq->areq;
  1853. _chain_buffer_in_init(pce_dev);
  1854. pce_dev->src_nents = count_sg(sreq->src, sreq->size);
  1855. qce_dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
  1856. DMA_TO_DEVICE);
  1857. if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) {
  1858. rc = -ENOMEM;
  1859. goto bad;
  1860. }
  1861. if (pad_len) {
  1862. if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
  1863. pad_len) < 0) {
  1864. rc = -ENOMEM;
  1865. goto bad;
  1866. }
  1867. }
  1868. _ce_in_final(pce_dev, 2, sreq->size + pad_len);
  1869. #ifdef QCE_DEBUG
  1870. _ce_in_dump(pce_dev);
  1871. #endif
  1872. rc = _sha_ce_setup(pce_dev, sreq);
  1873. if (rc < 0)
  1874. goto bad;
  1875. pce_dev->areq = areq;
  1876. pce_dev->qce_cb = sreq->qce_cb;
  1877. pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back;
  1878. rc = _qce_start_dma(pce_dev, true, false);
  1879. if (rc == 0)
  1880. return 0;
  1881. bad:
  1882. if (pce_dev->src_nents) {
  1883. qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
  1884. pce_dev->src_nents, DMA_TO_DEVICE);
  1885. }
  1886. return rc;
  1887. }
  1888. EXPORT_SYMBOL(qce_process_sha_req);
  1889. int qce_enable_clk(void *handle)
  1890. {
  1891. return 0;
  1892. }
  1893. EXPORT_SYMBOL(qce_enable_clk);
  1894. int qce_disable_clk(void *handle)
  1895. {
  1896. return 0;
  1897. }
  1898. EXPORT_SYMBOL(qce_disable_clk);
  1899. /*
  1900. * crypto engine open function.
  1901. */
  1902. void *qce_open(struct platform_device *pdev, int *rc)
  1903. {
  1904. struct qce_device *pce_dev;
  1905. struct resource *resource;
  1906. struct clk *ce_clk;
  1907. pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
  1908. if (!pce_dev) {
  1909. *rc = -ENOMEM;
  1910. dev_err(&pdev->dev, "Can not allocate memory\n");
  1911. return NULL;
  1912. }
  1913. pce_dev->pdev = &pdev->dev;
  1914. ce_clk = clk_get(pce_dev->pdev, "core_clk");
  1915. if (IS_ERR(ce_clk)) {
  1916. kfree(pce_dev);
  1917. *rc = PTR_ERR(ce_clk);
  1918. return NULL;
  1919. }
  1920. pce_dev->ce_clk = ce_clk;
  1921. *rc = clk_enable(pce_dev->ce_clk);
  1922. if (*rc) {
  1923. kfree(pce_dev);
  1924. return NULL;
  1925. }
  1926. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1927. if (!resource) {
  1928. *rc = -ENXIO;
  1929. dev_err(pce_dev->pdev, "Missing MEM resource\n");
  1930. goto err;
  1931. };
  1932. pce_dev->phy_iobase = resource->start;
  1933. pce_dev->iobase = ioremap_nocache(resource->start,
  1934. resource->end - resource->start + 1);
  1935. if (!pce_dev->iobase) {
  1936. *rc = -ENOMEM;
  1937. dev_err(pce_dev->pdev, "Can not map io memory\n");
  1938. goto err;
  1939. }
  1940. pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
  1941. GFP_KERNEL);
  1942. pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd),
  1943. GFP_KERNEL);
  1944. if (pce_dev->chan_ce_in_cmd == NULL ||
  1945. pce_dev->chan_ce_out_cmd == NULL) {
  1946. dev_err(pce_dev->pdev, "Can not allocate memory\n");
  1947. *rc = -ENOMEM;
  1948. goto err;
  1949. }
  1950. resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
  1951. "crypto_channels");
  1952. if (!resource) {
  1953. *rc = -ENXIO;
  1954. dev_err(pce_dev->pdev, "Missing DMA channel resource\n");
  1955. goto err;
  1956. };
  1957. pce_dev->chan_ce_in = resource->start;
  1958. pce_dev->chan_ce_out = resource->end;
  1959. resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
  1960. "crypto_crci_in");
  1961. if (!resource) {
  1962. *rc = -ENXIO;
  1963. dev_err(pce_dev->pdev, "Missing DMA crci in resource\n");
  1964. goto err;
  1965. };
  1966. pce_dev->crci_in = resource->start;
  1967. resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
  1968. "crypto_crci_out");
  1969. if (!resource) {
  1970. *rc = -ENXIO;
  1971. dev_err(pce_dev->pdev, "Missing DMA crci out resource\n");
  1972. goto err;
  1973. };
  1974. pce_dev->crci_out = resource->start;
  1975. resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
  1976. "crypto_crci_hash");
  1977. if (!resource) {
  1978. *rc = -ENXIO;
  1979. dev_err(pce_dev->pdev, "Missing DMA crci hash resource\n");
  1980. goto err;
  1981. };
  1982. pce_dev->crci_hash = resource->start;
  1983. pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
  1984. 2*PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL);
  1985. if (pce_dev->coh_vmem == NULL) {
  1986. *rc = -ENOMEM;
  1987. dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n");
  1988. goto err;
  1989. }
  1990. _setup_cmd_template(pce_dev);
  1991. pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE;
  1992. pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE;
  1993. if (_init_ce_engine(pce_dev)) {
  1994. *rc = -ENXIO;
  1995. clk_disable(pce_dev->ce_clk);
  1996. goto err;
  1997. }
  1998. *rc = 0;
  1999. clk_disable(pce_dev->ce_clk);
  2000. pce_dev->err = 0;
  2001. return pce_dev;
  2002. err:
  2003. if (pce_dev)
  2004. qce_close(pce_dev);
  2005. return NULL;
  2006. }
  2007. EXPORT_SYMBOL(qce_open);
  2008. /*
  2009. * crypto engine close function.
  2010. */
  2011. int qce_close(void *handle)
  2012. {
  2013. struct qce_device *pce_dev = (struct qce_device *) handle;
  2014. if (handle == NULL)
  2015. return -ENODEV;
  2016. if (pce_dev->iobase)
  2017. iounmap(pce_dev->iobase);
  2018. if (pce_dev->coh_vmem)
  2019. dma_free_coherent(pce_dev->pdev, 2*PAGE_SIZE, pce_dev->coh_vmem,
  2020. pce_dev->coh_pmem);
  2021. kfree(pce_dev->chan_ce_in_cmd);
  2022. kfree(pce_dev->chan_ce_out_cmd);
  2023. clk_put(pce_dev->ce_clk);
  2024. kfree(handle);
  2025. return 0;
  2026. }
  2027. EXPORT_SYMBOL(qce_close);
  2028. int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
  2029. {
  2030. struct qce_device *pce_dev = (struct qce_device *) handle;
  2031. if (ce_support == NULL)
  2032. return -EINVAL;
  2033. if (pce_dev->hmac == 1)
  2034. ce_support->sha1_hmac_20 = true;
  2035. else
  2036. ce_support->sha1_hmac_20 = false;
  2037. ce_support->sha1_hmac = false;
  2038. ce_support->sha256_hmac = false;
  2039. ce_support->sha_hmac = false;
  2040. ce_support->cmac = false;
  2041. ce_support->aes_key_192 = true;
  2042. ce_support->aes_xts = false;
  2043. ce_support->aes_ccm = false;
  2044. ce_support->ota = pce_dev->ota;
  2045. ce_support->aligned_only = false;
  2046. ce_support->is_shared = false;
  2047. ce_support->bam = false;
  2048. return 0;
  2049. }
  2050. EXPORT_SYMBOL(qce_hw_support);
  2051. int qce_f8_req(void *handle, struct qce_f8_req *req,
  2052. void *cookie, qce_comp_func_ptr_t qce_cb)
  2053. {
  2054. struct qce_device *pce_dev = (struct qce_device *) handle;
  2055. bool key_stream_mode;
  2056. dma_addr_t dst;
  2057. int rc;
  2058. uint32_t pad_len = ALIGN(req->data_len, ADM_CE_BLOCK_SIZE) -
  2059. req->data_len;
  2060. _chain_buffer_in_init(pce_dev);
  2061. _chain_buffer_out_init(pce_dev);
  2062. key_stream_mode = (req->data_in == NULL);
  2063. /* F8 cipher input */
  2064. if (key_stream_mode)
  2065. pce_dev->phy_ota_src = 0;
  2066. else {
  2067. pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
  2068. req->data_in, req->data_len,
  2069. (req->data_in == req->data_out) ?
  2070. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  2071. if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
  2072. req->data_len) < 0) {
  2073. pce_dev->phy_ota_dst = 0;
  2074. rc = -ENOMEM;
  2075. goto bad;
  2076. }
  2077. }
  2078. /* F8 cipher output */
  2079. if (req->data_in != req->data_out) {
  2080. dst = dma_map_single(pce_dev->pdev, req->data_out,
  2081. req->data_len, DMA_FROM_DEVICE);
  2082. pce_dev->phy_ota_dst = dst;
  2083. } else {
  2084. dst = pce_dev->phy_ota_src;
  2085. pce_dev->phy_ota_dst = 0;
  2086. }
  2087. if (_chain_pm_buffer_out(pce_dev, dst, req->data_len) < 0) {
  2088. rc = -ENOMEM;
  2089. goto bad;
  2090. }
  2091. pce_dev->ota_size = req->data_len;
  2092. /* pad data */
  2093. if (pad_len) {
  2094. if (!key_stream_mode && _chain_pm_buffer_in(pce_dev,
  2095. pce_dev->phy_ce_pad, pad_len) < 0) {
  2096. rc = -ENOMEM;
  2097. goto bad;
  2098. }
  2099. if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
  2100. pad_len) < 0) {
  2101. rc = -ENOMEM;
  2102. goto bad;
  2103. }
  2104. }
  2105. /* finalize the ce_in and ce_out channels command lists */
  2106. if (!key_stream_mode)
  2107. _ce_in_final(pce_dev, 1, req->data_len + pad_len);
  2108. _ce_out_final(pce_dev, 1, req->data_len + pad_len);
  2109. /* set up crypto device */
  2110. rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0, req->data_len);
  2111. if (rc < 0)
  2112. goto bad;
  2113. /* setup for callback, and issue command to adm */
  2114. pce_dev->areq = cookie;
  2115. pce_dev->qce_cb = qce_cb;
  2116. if (!key_stream_mode)
  2117. pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
  2118. pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
  2119. rc = _qce_start_dma(pce_dev, !(key_stream_mode), true);
  2120. if (rc == 0)
  2121. return 0;
  2122. bad:
  2123. if (pce_dev->phy_ota_dst != 0)
  2124. dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst,
  2125. req->data_len, DMA_FROM_DEVICE);
  2126. if (pce_dev->phy_ota_src != 0)
  2127. dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
  2128. req->data_len,
  2129. (req->data_in == req->data_out) ?
  2130. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  2131. return rc;
  2132. }
  2133. EXPORT_SYMBOL(qce_f8_req);
  2134. int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
  2135. void *cookie, qce_comp_func_ptr_t qce_cb)
  2136. {
  2137. struct qce_device *pce_dev = (struct qce_device *) handle;
  2138. uint16_t num_pkt = mreq->num_pkt;
  2139. uint16_t cipher_start = mreq->cipher_start;
  2140. uint16_t cipher_size = mreq->cipher_size;
  2141. struct qce_f8_req *req = &mreq->qce_f8_req;
  2142. uint32_t total;
  2143. uint32_t pad_len;
  2144. dma_addr_t dst = 0;
  2145. int rc = 0;
  2146. total = num_pkt * req->data_len;
  2147. pad_len = ALIGN(total, ADM_CE_BLOCK_SIZE) - total;
  2148. _chain_buffer_in_init(pce_dev);
  2149. _chain_buffer_out_init(pce_dev);
  2150. /* F8 cipher input */
  2151. pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev,
  2152. req->data_in, total,
  2153. (req->data_in == req->data_out) ?
  2154. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  2155. if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src,
  2156. total) < 0) {
  2157. pce_dev->phy_ota_dst = 0;
  2158. rc = -ENOMEM;
  2159. goto bad;
  2160. }
  2161. /* F8 cipher output */
  2162. if (req->data_in != req->data_out) {
  2163. dst = dma_map_single(pce_dev->pdev, req->data_out, total,
  2164. DMA_FROM_DEVICE);
  2165. pce_dev->phy_ota_dst = dst;
  2166. } else {
  2167. dst = pce_dev->phy_ota_src;
  2168. pce_dev->phy_ota_dst = 0;
  2169. }
  2170. if (_chain_pm_buffer_out(pce_dev, dst, total) < 0) {
  2171. rc = -ENOMEM;
  2172. goto bad;
  2173. }
  2174. pce_dev->ota_size = total;
  2175. /* pad data */
  2176. if (pad_len) {
  2177. if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
  2178. pad_len) < 0) {
  2179. rc = -ENOMEM;
  2180. goto bad;
  2181. }
  2182. if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad,
  2183. pad_len) < 0) {
  2184. rc = -ENOMEM;
  2185. goto bad;
  2186. }
  2187. }
  2188. /* finalize the ce_in and ce_out channels command lists */
  2189. _ce_in_final(pce_dev, 1, total + pad_len);
  2190. _ce_out_final(pce_dev, 1, total + pad_len);
  2191. /* set up crypto device */
  2192. rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
  2193. cipher_size);
  2194. if (rc)
  2195. goto bad;
  2196. /* setup for callback, and issue command to adm */
  2197. pce_dev->areq = cookie;
  2198. pce_dev->qce_cb = qce_cb;
  2199. pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back;
  2200. pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back;
  2201. rc = _qce_start_dma(pce_dev, true, true);
  2202. if (rc == 0)
  2203. return 0;
  2204. bad:
  2205. if (pce_dev->phy_ota_dst)
  2206. dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total,
  2207. DMA_FROM_DEVICE);
  2208. dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total,
  2209. (req->data_in == req->data_out) ?
  2210. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  2211. return rc;
  2212. }
  2213. EXPORT_SYMBOL(qce_f8_multi_pkt_req);
  2214. int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
  2215. qce_comp_func_ptr_t qce_cb)
  2216. {
  2217. struct qce_device *pce_dev = (struct qce_device *) handle;
  2218. int rc;
  2219. uint32_t pad_len = ALIGN(req->msize, ADM_CE_BLOCK_SIZE) - req->msize;
  2220. pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
  2221. req->msize, DMA_TO_DEVICE);
  2222. _chain_buffer_in_init(pce_dev);
  2223. rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, req->msize);
  2224. if (rc < 0) {
  2225. rc = -ENOMEM;
  2226. goto bad;
  2227. }
  2228. pce_dev->ota_size = req->msize;
  2229. if (pad_len) {
  2230. rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad,
  2231. pad_len);
  2232. if (rc < 0) {
  2233. rc = -ENOMEM;
  2234. goto bad;
  2235. }
  2236. }
  2237. _ce_in_final(pce_dev, 2, req->msize + pad_len);
  2238. rc = _ce_f9_setup(pce_dev, req);
  2239. if (rc < 0)
  2240. goto bad;
  2241. /* setup for callback, and issue command to adm */
  2242. pce_dev->areq = cookie;
  2243. pce_dev->qce_cb = qce_cb;
  2244. pce_dev->chan_ce_in_cmd->complete_func = _f9_ce_in_call_back;
  2245. rc = _qce_start_dma(pce_dev, true, false);
  2246. if (rc == 0)
  2247. return 0;
  2248. bad:
  2249. dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src,
  2250. req->msize, DMA_TO_DEVICE);
  2251. return rc;
  2252. }
  2253. EXPORT_SYMBOL(qce_f9_req);
  2254. MODULE_LICENSE("GPL v2");
  2255. MODULE_DESCRIPTION("Crypto Engine driver");