qat_uclo.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674
  1. /*
  2. This file is provided under a dual BSD/GPLv2 license. When using or
  3. redistributing this file, you may do so under either license.
  4. GPL LICENSE SUMMARY
  5. Copyright(c) 2014 Intel Corporation.
  6. This program is free software; you can redistribute it and/or modify
  7. it under the terms of version 2 of the GNU General Public License as
  8. published by the Free Software Foundation.
  9. This program is distributed in the hope that it will be useful, but
  10. WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. General Public License for more details.
  13. Contact Information:
  14. qat-linux@intel.com
  15. BSD LICENSE
  16. Copyright(c) 2014 Intel Corporation.
  17. Redistribution and use in source and binary forms, with or without
  18. modification, are permitted provided that the following conditions
  19. are met:
  20. * Redistributions of source code must retain the above copyright
  21. notice, this list of conditions and the following disclaimer.
  22. * Redistributions in binary form must reproduce the above copyright
  23. notice, this list of conditions and the following disclaimer in
  24. the documentation and/or other materials provided with the
  25. distribution.
  26. * Neither the name of Intel Corporation nor the names of its
  27. contributors may be used to endorse or promote products derived
  28. from this software without specific prior written permission.
  29. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  30. "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  31. LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  32. A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  33. OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  34. SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  35. LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  36. DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  37. THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  38. (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  39. OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  40. */
  41. #include <linux/slab.h>
  42. #include <linux/ctype.h>
  43. #include <linux/kernel.h>
  44. #include <linux/delay.h>
  45. #include "adf_accel_devices.h"
  46. #include "adf_common_drv.h"
  47. #include "icp_qat_uclo.h"
  48. #include "icp_qat_hal.h"
  49. #include "icp_qat_fw_loader_handle.h"
  50. #define UWORD_CPYBUF_SIZE 1024
  51. #define INVLD_UWORD 0xffffffffffull
  52. #define PID_MINOR_REV 0xf
  53. #define PID_MAJOR_REV (0xf << 4)
  54. static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
  55. unsigned int ae, unsigned int image_num)
  56. {
  57. struct icp_qat_uclo_aedata *ae_data;
  58. struct icp_qat_uclo_encapme *encap_image;
  59. struct icp_qat_uclo_page *page = NULL;
  60. struct icp_qat_uclo_aeslice *ae_slice = NULL;
  61. ae_data = &obj_handle->ae_data[ae];
  62. encap_image = &obj_handle->ae_uimage[image_num];
  63. ae_slice = &ae_data->ae_slices[ae_data->slice_num];
  64. ae_slice->encap_image = encap_image;
  65. if (encap_image->img_ptr) {
  66. ae_slice->ctx_mask_assigned =
  67. encap_image->img_ptr->ctx_assigned;
  68. ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
  69. } else {
  70. ae_slice->ctx_mask_assigned = 0;
  71. }
  72. ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
  73. if (!ae_slice->region)
  74. return -ENOMEM;
  75. ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
  76. if (!ae_slice->page)
  77. goto out_err;
  78. page = ae_slice->page;
  79. page->encap_page = encap_image->page;
  80. ae_slice->page->region = ae_slice->region;
  81. ae_data->slice_num++;
  82. return 0;
  83. out_err:
  84. kfree(ae_slice->region);
  85. ae_slice->region = NULL;
  86. return -ENOMEM;
  87. }
  88. static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
  89. {
  90. unsigned int i;
  91. if (!ae_data) {
  92. pr_err("QAT: bad argument, ae_data is NULL\n ");
  93. return -EINVAL;
  94. }
  95. for (i = 0; i < ae_data->slice_num; i++) {
  96. kfree(ae_data->ae_slices[i].region);
  97. ae_data->ae_slices[i].region = NULL;
  98. kfree(ae_data->ae_slices[i].page);
  99. ae_data->ae_slices[i].page = NULL;
  100. }
  101. return 0;
  102. }
  103. static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
  104. unsigned int str_offset)
  105. {
  106. if ((!str_table->table_len) || (str_offset > str_table->table_len))
  107. return NULL;
  108. return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
  109. }
  110. static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
  111. {
  112. int maj = hdr->maj_ver & 0xff;
  113. int min = hdr->min_ver & 0xff;
  114. if (hdr->file_id != ICP_QAT_UOF_FID) {
  115. pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
  116. return -EINVAL;
  117. }
  118. if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
  119. pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
  120. maj, min);
  121. return -EINVAL;
  122. }
  123. return 0;
  124. }
  125. static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
  126. {
  127. int maj = suof_hdr->maj_ver & 0xff;
  128. int min = suof_hdr->min_ver & 0xff;
  129. if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
  130. pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
  131. return -EINVAL;
  132. }
  133. if (suof_hdr->fw_type != 0) {
  134. pr_err("QAT: unsupported firmware type\n");
  135. return -EINVAL;
  136. }
  137. if (suof_hdr->num_chunks <= 0x1) {
  138. pr_err("QAT: SUOF chunk amount is incorrect\n");
  139. return -EINVAL;
  140. }
  141. if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
  142. pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
  143. maj, min);
  144. return -EINVAL;
  145. }
  146. return 0;
  147. }
  148. static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
  149. unsigned int addr, unsigned int *val,
  150. unsigned int num_in_bytes)
  151. {
  152. unsigned int outval;
  153. unsigned char *ptr = (unsigned char *)val;
  154. while (num_in_bytes) {
  155. memcpy(&outval, ptr, 4);
  156. SRAM_WRITE(handle, addr, outval);
  157. num_in_bytes -= 4;
  158. ptr += 4;
  159. addr += 4;
  160. }
  161. }
  162. static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
  163. unsigned char ae, unsigned int addr,
  164. unsigned int *val,
  165. unsigned int num_in_bytes)
  166. {
  167. unsigned int outval;
  168. unsigned char *ptr = (unsigned char *)val;
  169. addr >>= 0x2; /* convert to uword address */
  170. while (num_in_bytes) {
  171. memcpy(&outval, ptr, 4);
  172. qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
  173. num_in_bytes -= 4;
  174. ptr += 4;
  175. }
  176. }
  177. static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
  178. unsigned char ae,
  179. struct icp_qat_uof_batch_init
  180. *umem_init_header)
  181. {
  182. struct icp_qat_uof_batch_init *umem_init;
  183. if (!umem_init_header)
  184. return;
  185. umem_init = umem_init_header->next;
  186. while (umem_init) {
  187. unsigned int addr, *value, size;
  188. ae = umem_init->ae;
  189. addr = umem_init->addr;
  190. value = umem_init->value;
  191. size = umem_init->size;
  192. qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
  193. umem_init = umem_init->next;
  194. }
  195. }
  196. static void
  197. qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
  198. struct icp_qat_uof_batch_init **base)
  199. {
  200. struct icp_qat_uof_batch_init *umem_init;
  201. umem_init = *base;
  202. while (umem_init) {
  203. struct icp_qat_uof_batch_init *pre;
  204. pre = umem_init;
  205. umem_init = umem_init->next;
  206. kfree(pre);
  207. }
  208. *base = NULL;
  209. }
  210. static int qat_uclo_parse_num(char *str, unsigned int *num)
  211. {
  212. char buf[16] = {0};
  213. unsigned long ae = 0;
  214. int i;
  215. strncpy(buf, str, 15);
  216. for (i = 0; i < 16; i++) {
  217. if (!isdigit(buf[i])) {
  218. buf[i] = '\0';
  219. break;
  220. }
  221. }
  222. if ((kstrtoul(buf, 10, &ae)))
  223. return -EFAULT;
  224. *num = (unsigned int)ae;
  225. return 0;
  226. }
  227. static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
  228. struct icp_qat_uof_initmem *init_mem,
  229. unsigned int size_range, unsigned int *ae)
  230. {
  231. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  232. char *str;
  233. if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
  234. pr_err("QAT: initmem is out of range");
  235. return -EINVAL;
  236. }
  237. if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
  238. pr_err("QAT: Memory scope for init_mem error\n");
  239. return -EINVAL;
  240. }
  241. str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
  242. if (!str) {
  243. pr_err("QAT: AE name assigned in UOF init table is NULL\n");
  244. return -EINVAL;
  245. }
  246. if (qat_uclo_parse_num(str, ae)) {
  247. pr_err("QAT: Parse num for AE number failed\n");
  248. return -EINVAL;
  249. }
  250. if (*ae >= ICP_QAT_UCLO_MAX_AE) {
  251. pr_err("QAT: ae %d out of range\n", *ae);
  252. return -EINVAL;
  253. }
  254. return 0;
  255. }
  256. static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
  257. *handle, struct icp_qat_uof_initmem
  258. *init_mem, unsigned int ae,
  259. struct icp_qat_uof_batch_init
  260. **init_tab_base)
  261. {
  262. struct icp_qat_uof_batch_init *init_header, *tail;
  263. struct icp_qat_uof_batch_init *mem_init, *tail_old;
  264. struct icp_qat_uof_memvar_attr *mem_val_attr;
  265. unsigned int i, flag = 0;
  266. mem_val_attr =
  267. (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
  268. sizeof(struct icp_qat_uof_initmem));
  269. init_header = *init_tab_base;
  270. if (!init_header) {
  271. init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
  272. if (!init_header)
  273. return -ENOMEM;
  274. init_header->size = 1;
  275. *init_tab_base = init_header;
  276. flag = 1;
  277. }
  278. tail_old = init_header;
  279. while (tail_old->next)
  280. tail_old = tail_old->next;
  281. tail = tail_old;
  282. for (i = 0; i < init_mem->val_attr_num; i++) {
  283. mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
  284. if (!mem_init)
  285. goto out_err;
  286. mem_init->ae = ae;
  287. mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
  288. mem_init->value = &mem_val_attr->value;
  289. mem_init->size = 4;
  290. mem_init->next = NULL;
  291. tail->next = mem_init;
  292. tail = mem_init;
  293. init_header->size += qat_hal_get_ins_num();
  294. mem_val_attr++;
  295. }
  296. return 0;
  297. out_err:
  298. /* Do not free the list head unless we allocated it. */
  299. tail_old = tail_old->next;
  300. if (flag) {
  301. kfree(*init_tab_base);
  302. *init_tab_base = NULL;
  303. }
  304. while (tail_old) {
  305. mem_init = tail_old->next;
  306. kfree(tail_old);
  307. tail_old = mem_init;
  308. }
  309. return -ENOMEM;
  310. }
  311. static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
  312. struct icp_qat_uof_initmem *init_mem)
  313. {
  314. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  315. unsigned int ae;
  316. if (qat_uclo_fetch_initmem_ae(handle, init_mem,
  317. ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
  318. return -EINVAL;
  319. if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
  320. &obj_handle->lm_init_tab[ae]))
  321. return -EINVAL;
  322. return 0;
  323. }
  324. static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
  325. struct icp_qat_uof_initmem *init_mem)
  326. {
  327. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  328. unsigned int ae, ustore_size, uaddr, i;
  329. ustore_size = obj_handle->ustore_phy_size;
  330. if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
  331. return -EINVAL;
  332. if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
  333. &obj_handle->umem_init_tab[ae]))
  334. return -EINVAL;
  335. /* set the highest ustore address referenced */
  336. uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
  337. for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
  338. if (obj_handle->ae_data[ae].ae_slices[i].
  339. encap_image->uwords_num < uaddr)
  340. obj_handle->ae_data[ae].ae_slices[i].
  341. encap_image->uwords_num = uaddr;
  342. }
  343. return 0;
  344. }
  345. static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
  346. struct icp_qat_uof_initmem *init_mem)
  347. {
  348. switch (init_mem->region) {
  349. case ICP_QAT_UOF_LMEM_REGION:
  350. if (qat_uclo_init_lmem_seg(handle, init_mem))
  351. return -EINVAL;
  352. break;
  353. case ICP_QAT_UOF_UMEM_REGION:
  354. if (qat_uclo_init_umem_seg(handle, init_mem))
  355. return -EINVAL;
  356. break;
  357. default:
  358. pr_err("QAT: initmem region error. region type=0x%x\n",
  359. init_mem->region);
  360. return -EINVAL;
  361. }
  362. return 0;
  363. }
  364. static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
  365. struct icp_qat_uclo_encapme *image)
  366. {
  367. unsigned int i;
  368. struct icp_qat_uclo_encap_page *page;
  369. struct icp_qat_uof_image *uof_image;
  370. unsigned char ae;
  371. unsigned int ustore_size;
  372. unsigned int patt_pos;
  373. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  374. uint64_t *fill_data;
  375. uof_image = image->img_ptr;
  376. fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
  377. GFP_KERNEL);
  378. if (!fill_data)
  379. return -ENOMEM;
  380. for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
  381. memcpy(&fill_data[i], &uof_image->fill_pattern,
  382. sizeof(uint64_t));
  383. page = image->page;
  384. for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
  385. if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
  386. continue;
  387. ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
  388. patt_pos = page->beg_addr_p + page->micro_words_num;
  389. qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
  390. page->beg_addr_p, &fill_data[0]);
  391. qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
  392. ustore_size - patt_pos + 1,
  393. &fill_data[page->beg_addr_p]);
  394. }
  395. kfree(fill_data);
  396. return 0;
  397. }
  398. static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
  399. {
  400. int i, ae;
  401. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  402. struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
  403. for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
  404. if (initmem->num_in_bytes) {
  405. if (qat_uclo_init_ae_memory(handle, initmem))
  406. return -EINVAL;
  407. }
  408. initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
  409. (uintptr_t)initmem +
  410. sizeof(struct icp_qat_uof_initmem)) +
  411. (sizeof(struct icp_qat_uof_memvar_attr) *
  412. initmem->val_attr_num));
  413. }
  414. for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
  415. if (qat_hal_batch_wr_lm(handle, ae,
  416. obj_handle->lm_init_tab[ae])) {
  417. pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
  418. return -EINVAL;
  419. }
  420. qat_uclo_cleanup_batch_init_list(handle,
  421. &obj_handle->lm_init_tab[ae]);
  422. qat_uclo_batch_wr_umem(handle, ae,
  423. obj_handle->umem_init_tab[ae]);
  424. qat_uclo_cleanup_batch_init_list(handle,
  425. &obj_handle->
  426. umem_init_tab[ae]);
  427. }
  428. return 0;
  429. }
  430. static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
  431. char *chunk_id, void *cur)
  432. {
  433. int i;
  434. struct icp_qat_uof_chunkhdr *chunk_hdr =
  435. (struct icp_qat_uof_chunkhdr *)
  436. ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
  437. for (i = 0; i < obj_hdr->num_chunks; i++) {
  438. if ((cur < (void *)&chunk_hdr[i]) &&
  439. !strncmp(chunk_hdr[i].chunk_id, chunk_id,
  440. ICP_QAT_UOF_OBJID_LEN)) {
  441. return &chunk_hdr[i];
  442. }
  443. }
  444. return NULL;
  445. }
  446. static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
  447. {
  448. int i;
  449. unsigned int topbit = 1 << 0xF;
  450. unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
  451. reg ^= inbyte << 0x8;
  452. for (i = 0; i < 0x8; i++) {
  453. if (reg & topbit)
  454. reg = (reg << 1) ^ 0x1021;
  455. else
  456. reg <<= 1;
  457. }
  458. return reg & 0xFFFF;
  459. }
  460. static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
  461. {
  462. unsigned int chksum = 0;
  463. if (ptr)
  464. while (num--)
  465. chksum = qat_uclo_calc_checksum(chksum, *ptr++);
  466. return chksum;
  467. }
  468. static struct icp_qat_uclo_objhdr *
  469. qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
  470. char *chunk_id)
  471. {
  472. struct icp_qat_uof_filechunkhdr *file_chunk;
  473. struct icp_qat_uclo_objhdr *obj_hdr;
  474. char *chunk;
  475. int i;
  476. file_chunk = (struct icp_qat_uof_filechunkhdr *)
  477. (buf + sizeof(struct icp_qat_uof_filehdr));
  478. for (i = 0; i < file_hdr->num_chunks; i++) {
  479. if (!strncmp(file_chunk->chunk_id, chunk_id,
  480. ICP_QAT_UOF_OBJID_LEN)) {
  481. chunk = buf + file_chunk->offset;
  482. if (file_chunk->checksum != qat_uclo_calc_str_checksum(
  483. chunk, file_chunk->size))
  484. break;
  485. obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
  486. if (!obj_hdr)
  487. break;
  488. obj_hdr->file_buff = chunk;
  489. obj_hdr->checksum = file_chunk->checksum;
  490. obj_hdr->size = file_chunk->size;
  491. return obj_hdr;
  492. }
  493. file_chunk++;
  494. }
  495. return NULL;
  496. }
  497. static unsigned int
  498. qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
  499. struct icp_qat_uof_image *image)
  500. {
  501. struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
  502. struct icp_qat_uof_objtable *neigh_reg_tab;
  503. struct icp_qat_uof_code_page *code_page;
  504. code_page = (struct icp_qat_uof_code_page *)
  505. ((char *)image + sizeof(struct icp_qat_uof_image));
  506. uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
  507. code_page->uc_var_tab_offset);
  508. imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
  509. code_page->imp_var_tab_offset);
  510. imp_expr_tab = (struct icp_qat_uof_objtable *)
  511. (encap_uof_obj->beg_uof +
  512. code_page->imp_expr_tab_offset);
  513. if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
  514. imp_expr_tab->entry_num) {
  515. pr_err("QAT: UOF can't contain imported variable to be parsed");
  516. return -EINVAL;
  517. }
  518. neigh_reg_tab = (struct icp_qat_uof_objtable *)
  519. (encap_uof_obj->beg_uof +
  520. code_page->neigh_reg_tab_offset);
  521. if (neigh_reg_tab->entry_num) {
  522. pr_err("QAT: UOF can't contain shared control store feature");
  523. return -EINVAL;
  524. }
  525. if (image->numpages > 1) {
  526. pr_err("QAT: UOF can't contain multiple pages");
  527. return -EINVAL;
  528. }
  529. if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
  530. pr_err("QAT: UOF can't use shared control store feature");
  531. return -EFAULT;
  532. }
  533. if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
  534. pr_err("QAT: UOF can't use reloadable feature");
  535. return -EFAULT;
  536. }
  537. return 0;
  538. }
  539. static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
  540. *encap_uof_obj,
  541. struct icp_qat_uof_image *img,
  542. struct icp_qat_uclo_encap_page *page)
  543. {
  544. struct icp_qat_uof_code_page *code_page;
  545. struct icp_qat_uof_code_area *code_area;
  546. struct icp_qat_uof_objtable *uword_block_tab;
  547. struct icp_qat_uof_uword_block *uwblock;
  548. int i;
  549. code_page = (struct icp_qat_uof_code_page *)
  550. ((char *)img + sizeof(struct icp_qat_uof_image));
  551. page->def_page = code_page->def_page;
  552. page->page_region = code_page->page_region;
  553. page->beg_addr_v = code_page->beg_addr_v;
  554. page->beg_addr_p = code_page->beg_addr_p;
  555. code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
  556. code_page->code_area_offset);
  557. page->micro_words_num = code_area->micro_words_num;
  558. uword_block_tab = (struct icp_qat_uof_objtable *)
  559. (encap_uof_obj->beg_uof +
  560. code_area->uword_block_tab);
  561. page->uwblock_num = uword_block_tab->entry_num;
  562. uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
  563. sizeof(struct icp_qat_uof_objtable));
  564. page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
  565. for (i = 0; i < uword_block_tab->entry_num; i++)
  566. page->uwblock[i].micro_words =
  567. (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
  568. }
  569. static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
  570. struct icp_qat_uclo_encapme *ae_uimage,
  571. int max_image)
  572. {
  573. int i, j;
  574. struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
  575. struct icp_qat_uof_image *image;
  576. struct icp_qat_uof_objtable *ae_regtab;
  577. struct icp_qat_uof_objtable *init_reg_sym_tab;
  578. struct icp_qat_uof_objtable *sbreak_tab;
  579. struct icp_qat_uof_encap_obj *encap_uof_obj =
  580. &obj_handle->encap_uof_obj;
  581. for (j = 0; j < max_image; j++) {
  582. chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
  583. ICP_QAT_UOF_IMAG, chunk_hdr);
  584. if (!chunk_hdr)
  585. break;
  586. image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
  587. chunk_hdr->offset);
  588. ae_regtab = (struct icp_qat_uof_objtable *)
  589. (image->reg_tab_offset +
  590. obj_handle->obj_hdr->file_buff);
  591. ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
  592. ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
  593. (((char *)ae_regtab) +
  594. sizeof(struct icp_qat_uof_objtable));
  595. init_reg_sym_tab = (struct icp_qat_uof_objtable *)
  596. (image->init_reg_sym_tab +
  597. obj_handle->obj_hdr->file_buff);
  598. ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
  599. ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
  600. (((char *)init_reg_sym_tab) +
  601. sizeof(struct icp_qat_uof_objtable));
  602. sbreak_tab = (struct icp_qat_uof_objtable *)
  603. (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
  604. ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
  605. ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
  606. (((char *)sbreak_tab) +
  607. sizeof(struct icp_qat_uof_objtable));
  608. ae_uimage[j].img_ptr = image;
  609. if (qat_uclo_check_image_compat(encap_uof_obj, image))
  610. goto out_err;
  611. ae_uimage[j].page =
  612. kzalloc(sizeof(struct icp_qat_uclo_encap_page),
  613. GFP_KERNEL);
  614. if (!ae_uimage[j].page)
  615. goto out_err;
  616. qat_uclo_map_image_page(encap_uof_obj, image,
  617. ae_uimage[j].page);
  618. }
  619. return j;
  620. out_err:
  621. for (i = 0; i < j; i++)
  622. kfree(ae_uimage[i].page);
  623. return 0;
  624. }
  625. static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
  626. {
  627. int i, ae;
  628. int mflag = 0;
  629. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  630. for (ae = 0; ae < max_ae; ae++) {
  631. if (!test_bit(ae,
  632. (unsigned long *)&handle->hal_handle->ae_mask))
  633. continue;
  634. for (i = 0; i < obj_handle->uimage_num; i++) {
  635. if (!test_bit(ae, (unsigned long *)
  636. &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
  637. continue;
  638. mflag = 1;
  639. if (qat_uclo_init_ae_data(obj_handle, ae, i))
  640. return -EINVAL;
  641. }
  642. }
  643. if (!mflag) {
  644. pr_err("QAT: uimage uses AE not set");
  645. return -EINVAL;
  646. }
  647. return 0;
  648. }
  649. static struct icp_qat_uof_strtable *
  650. qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
  651. char *tab_name, struct icp_qat_uof_strtable *str_table)
  652. {
  653. struct icp_qat_uof_chunkhdr *chunk_hdr;
  654. chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
  655. obj_hdr->file_buff, tab_name, NULL);
  656. if (chunk_hdr) {
  657. int hdr_size;
  658. memcpy(&str_table->table_len, obj_hdr->file_buff +
  659. chunk_hdr->offset, sizeof(str_table->table_len));
  660. hdr_size = (char *)&str_table->strings - (char *)str_table;
  661. str_table->strings = (uintptr_t)obj_hdr->file_buff +
  662. chunk_hdr->offset + hdr_size;
  663. return str_table;
  664. }
  665. return NULL;
  666. }
  667. static void
  668. qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
  669. struct icp_qat_uclo_init_mem_table *init_mem_tab)
  670. {
  671. struct icp_qat_uof_chunkhdr *chunk_hdr;
  672. chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
  673. ICP_QAT_UOF_IMEM, NULL);
  674. if (chunk_hdr) {
  675. memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
  676. chunk_hdr->offset, sizeof(unsigned int));
  677. init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
  678. (encap_uof_obj->beg_uof + chunk_hdr->offset +
  679. sizeof(unsigned int));
  680. }
  681. }
  682. static unsigned int
  683. qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
  684. {
  685. switch (handle->pci_dev->device) {
  686. case ADF_DH895XCC_PCI_DEVICE_ID:
  687. return ICP_QAT_AC_895XCC_DEV_TYPE;
  688. case ADF_C62X_PCI_DEVICE_ID:
  689. return ICP_QAT_AC_C62X_DEV_TYPE;
  690. case ADF_C3XXX_PCI_DEVICE_ID:
  691. return ICP_QAT_AC_C3XXX_DEV_TYPE;
  692. default:
  693. pr_err("QAT: unsupported device 0x%x\n",
  694. handle->pci_dev->device);
  695. return 0;
  696. }
  697. }
  698. static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
  699. {
  700. unsigned int maj_ver, prod_type = obj_handle->prod_type;
  701. if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
  702. pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
  703. obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
  704. prod_type);
  705. return -EINVAL;
  706. }
  707. maj_ver = obj_handle->prod_rev & 0xff;
  708. if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
  709. (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
  710. pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
  711. return -EINVAL;
  712. }
  713. return 0;
  714. }
  715. static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
  716. unsigned char ae, unsigned char ctx_mask,
  717. enum icp_qat_uof_regtype reg_type,
  718. unsigned short reg_addr, unsigned int value)
  719. {
  720. switch (reg_type) {
  721. case ICP_GPA_ABS:
  722. case ICP_GPB_ABS:
  723. ctx_mask = 0;
  724. case ICP_GPA_REL:
  725. case ICP_GPB_REL:
  726. return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
  727. reg_addr, value);
  728. case ICP_SR_ABS:
  729. case ICP_DR_ABS:
  730. case ICP_SR_RD_ABS:
  731. case ICP_DR_RD_ABS:
  732. ctx_mask = 0;
  733. case ICP_SR_REL:
  734. case ICP_DR_REL:
  735. case ICP_SR_RD_REL:
  736. case ICP_DR_RD_REL:
  737. return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
  738. reg_addr, value);
  739. case ICP_SR_WR_ABS:
  740. case ICP_DR_WR_ABS:
  741. ctx_mask = 0;
  742. case ICP_SR_WR_REL:
  743. case ICP_DR_WR_REL:
  744. return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
  745. reg_addr, value);
  746. case ICP_NEIGH_REL:
  747. return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
  748. default:
  749. pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
  750. return -EFAULT;
  751. }
  752. return 0;
  753. }
  754. static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
  755. unsigned int ae,
  756. struct icp_qat_uclo_encapme *encap_ae)
  757. {
  758. unsigned int i;
  759. unsigned char ctx_mask;
  760. struct icp_qat_uof_init_regsym *init_regsym;
  761. if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
  762. ICP_QAT_UCLO_MAX_CTX)
  763. ctx_mask = 0xff;
  764. else
  765. ctx_mask = 0x55;
  766. for (i = 0; i < encap_ae->init_regsym_num; i++) {
  767. unsigned int exp_res;
  768. init_regsym = &encap_ae->init_regsym[i];
  769. exp_res = init_regsym->value;
  770. switch (init_regsym->init_type) {
  771. case ICP_QAT_UOF_INIT_REG:
  772. qat_uclo_init_reg(handle, ae, ctx_mask,
  773. (enum icp_qat_uof_regtype)
  774. init_regsym->reg_type,
  775. (unsigned short)init_regsym->reg_addr,
  776. exp_res);
  777. break;
  778. case ICP_QAT_UOF_INIT_REG_CTX:
  779. /* check if ctx is appropriate for the ctxMode */
  780. if (!((1 << init_regsym->ctx) & ctx_mask)) {
  781. pr_err("QAT: invalid ctx num = 0x%x\n",
  782. init_regsym->ctx);
  783. return -EINVAL;
  784. }
  785. qat_uclo_init_reg(handle, ae,
  786. (unsigned char)
  787. (1 << init_regsym->ctx),
  788. (enum icp_qat_uof_regtype)
  789. init_regsym->reg_type,
  790. (unsigned short)init_regsym->reg_addr,
  791. exp_res);
  792. break;
  793. case ICP_QAT_UOF_INIT_EXPR:
  794. pr_err("QAT: INIT_EXPR feature not supported\n");
  795. return -EINVAL;
  796. case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
  797. pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
  798. return -EINVAL;
  799. default:
  800. break;
  801. }
  802. }
  803. return 0;
  804. }
  805. static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
  806. {
  807. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  808. unsigned int s, ae;
  809. if (obj_handle->global_inited)
  810. return 0;
  811. if (obj_handle->init_mem_tab.entry_num) {
  812. if (qat_uclo_init_memory(handle)) {
  813. pr_err("QAT: initialize memory failed\n");
  814. return -EINVAL;
  815. }
  816. }
  817. for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
  818. for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
  819. if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
  820. continue;
  821. if (qat_uclo_init_reg_sym(handle, ae,
  822. obj_handle->ae_data[ae].
  823. ae_slices[s].encap_image))
  824. return -EINVAL;
  825. }
  826. }
  827. obj_handle->global_inited = 1;
  828. return 0;
  829. }
  830. static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
  831. {
  832. unsigned char ae, nn_mode, s;
  833. struct icp_qat_uof_image *uof_image;
  834. struct icp_qat_uclo_aedata *ae_data;
  835. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  836. for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
  837. if (!test_bit(ae,
  838. (unsigned long *)&handle->hal_handle->ae_mask))
  839. continue;
  840. ae_data = &obj_handle->ae_data[ae];
  841. for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
  842. ICP_QAT_UCLO_MAX_CTX); s++) {
  843. if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
  844. continue;
  845. uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
  846. if (qat_hal_set_ae_ctx_mode(handle, ae,
  847. (char)ICP_QAT_CTX_MODE
  848. (uof_image->ae_mode))) {
  849. pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
  850. return -EFAULT;
  851. }
  852. nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
  853. if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
  854. pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
  855. return -EFAULT;
  856. }
  857. if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
  858. (char)ICP_QAT_LOC_MEM0_MODE
  859. (uof_image->ae_mode))) {
  860. pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
  861. return -EFAULT;
  862. }
  863. if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
  864. (char)ICP_QAT_LOC_MEM1_MODE
  865. (uof_image->ae_mode))) {
  866. pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
  867. return -EFAULT;
  868. }
  869. }
  870. }
  871. return 0;
  872. }
  873. static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
  874. {
  875. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  876. struct icp_qat_uclo_encapme *image;
  877. int a;
  878. for (a = 0; a < obj_handle->uimage_num; a++) {
  879. image = &obj_handle->ae_uimage[a];
  880. image->uwords_num = image->page->beg_addr_p +
  881. image->page->micro_words_num;
  882. }
  883. }
  884. static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
  885. {
  886. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  887. unsigned int ae;
  888. obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
  889. obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
  890. obj_handle->obj_hdr->file_buff;
  891. obj_handle->uword_in_bytes = 6;
  892. obj_handle->prod_type = qat_uclo_get_dev_type(handle);
  893. obj_handle->prod_rev = PID_MAJOR_REV |
  894. (PID_MINOR_REV & handle->hal_handle->revision_id);
  895. if (qat_uclo_check_uof_compat(obj_handle)) {
  896. pr_err("QAT: UOF incompatible\n");
  897. return -EINVAL;
  898. }
  899. obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
  900. GFP_KERNEL);
  901. if (!obj_handle->uword_buf)
  902. return -ENOMEM;
  903. obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
  904. if (!obj_handle->obj_hdr->file_buff ||
  905. !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
  906. &obj_handle->str_table)) {
  907. pr_err("QAT: UOF doesn't have effective images\n");
  908. goto out_err;
  909. }
  910. obj_handle->uimage_num =
  911. qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
  912. ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
  913. if (!obj_handle->uimage_num)
  914. goto out_err;
  915. if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
  916. pr_err("QAT: Bad object\n");
  917. goto out_check_uof_aemask_err;
  918. }
  919. qat_uclo_init_uword_num(handle);
  920. qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
  921. &obj_handle->init_mem_tab);
  922. if (qat_uclo_set_ae_mode(handle))
  923. goto out_check_uof_aemask_err;
  924. return 0;
  925. out_check_uof_aemask_err:
  926. for (ae = 0; ae < obj_handle->uimage_num; ae++)
  927. kfree(obj_handle->ae_uimage[ae].page);
  928. out_err:
  929. kfree(obj_handle->uword_buf);
  930. return -EFAULT;
  931. }
  932. static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
  933. struct icp_qat_suof_filehdr *suof_ptr,
  934. int suof_size)
  935. {
  936. unsigned int check_sum = 0;
  937. unsigned int min_ver_offset = 0;
  938. struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
  939. suof_handle->file_id = ICP_QAT_SUOF_FID;
  940. suof_handle->suof_buf = (char *)suof_ptr;
  941. suof_handle->suof_size = suof_size;
  942. min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
  943. min_ver);
  944. check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
  945. min_ver_offset);
  946. if (check_sum != suof_ptr->check_sum) {
  947. pr_err("QAT: incorrect SUOF checksum\n");
  948. return -EINVAL;
  949. }
  950. suof_handle->check_sum = suof_ptr->check_sum;
  951. suof_handle->min_ver = suof_ptr->min_ver;
  952. suof_handle->maj_ver = suof_ptr->maj_ver;
  953. suof_handle->fw_type = suof_ptr->fw_type;
  954. return 0;
  955. }
  956. static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
  957. struct icp_qat_suof_img_hdr *suof_img_hdr,
  958. struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
  959. {
  960. struct icp_qat_simg_ae_mode *ae_mode;
  961. struct icp_qat_suof_objhdr *suof_objhdr;
  962. suof_img_hdr->simg_buf = (suof_handle->suof_buf +
  963. suof_chunk_hdr->offset +
  964. sizeof(*suof_objhdr));
  965. suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
  966. (suof_handle->suof_buf +
  967. suof_chunk_hdr->offset))->img_length;
  968. suof_img_hdr->css_header = suof_img_hdr->simg_buf;
  969. suof_img_hdr->css_key = (suof_img_hdr->css_header +
  970. sizeof(struct icp_qat_css_hdr));
  971. suof_img_hdr->css_signature = suof_img_hdr->css_key +
  972. ICP_QAT_CSS_FWSK_MODULUS_LEN +
  973. ICP_QAT_CSS_FWSK_EXPONENT_LEN;
  974. suof_img_hdr->css_simg = suof_img_hdr->css_signature +
  975. ICP_QAT_CSS_SIGNATURE_LEN;
  976. ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
  977. suof_img_hdr->ae_mask = ae_mode->ae_mask;
  978. suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
  979. suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
  980. suof_img_hdr->fw_type = ae_mode->fw_type;
  981. }
  982. static void
  983. qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
  984. struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
  985. {
  986. char **sym_str = (char **)&suof_handle->sym_str;
  987. unsigned int *sym_size = &suof_handle->sym_size;
  988. struct icp_qat_suof_strtable *str_table_obj;
  989. *sym_size = *(unsigned int *)(uintptr_t)
  990. (suof_chunk_hdr->offset + suof_handle->suof_buf);
  991. *sym_str = (char *)(uintptr_t)
  992. (suof_handle->suof_buf + suof_chunk_hdr->offset +
  993. sizeof(str_table_obj->tab_length));
  994. }
  995. static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
  996. struct icp_qat_suof_img_hdr *img_hdr)
  997. {
  998. struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
  999. unsigned int prod_rev, maj_ver, prod_type;
  1000. prod_type = qat_uclo_get_dev_type(handle);
  1001. img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
  1002. prod_rev = PID_MAJOR_REV |
  1003. (PID_MINOR_REV & handle->hal_handle->revision_id);
  1004. if (img_ae_mode->dev_type != prod_type) {
  1005. pr_err("QAT: incompatible product type %x\n",
  1006. img_ae_mode->dev_type);
  1007. return -EINVAL;
  1008. }
  1009. maj_ver = prod_rev & 0xff;
  1010. if ((maj_ver > img_ae_mode->devmax_ver) ||
  1011. (maj_ver < img_ae_mode->devmin_ver)) {
  1012. pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
  1013. return -EINVAL;
  1014. }
  1015. return 0;
  1016. }
  1017. static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
  1018. {
  1019. struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
  1020. kfree(sobj_handle->img_table.simg_hdr);
  1021. sobj_handle->img_table.simg_hdr = NULL;
  1022. kfree(handle->sobj_handle);
  1023. handle->sobj_handle = NULL;
  1024. }
  1025. static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
  1026. unsigned int img_id, unsigned int num_simgs)
  1027. {
  1028. struct icp_qat_suof_img_hdr img_header;
  1029. if (img_id != num_simgs - 1) {
  1030. memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
  1031. sizeof(*suof_img_hdr));
  1032. memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
  1033. sizeof(*suof_img_hdr));
  1034. memcpy(&suof_img_hdr[img_id], &img_header,
  1035. sizeof(*suof_img_hdr));
  1036. }
  1037. }
  1038. static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
  1039. struct icp_qat_suof_filehdr *suof_ptr,
  1040. int suof_size)
  1041. {
  1042. struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
  1043. struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
  1044. struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
  1045. int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
  1046. unsigned int i = 0;
  1047. struct icp_qat_suof_img_hdr img_header;
  1048. if (!suof_ptr || (suof_size == 0)) {
  1049. pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
  1050. return -EINVAL;
  1051. }
  1052. if (qat_uclo_check_suof_format(suof_ptr))
  1053. return -EINVAL;
  1054. ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
  1055. if (ret)
  1056. return ret;
  1057. suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
  1058. ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
  1059. qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
  1060. suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
  1061. if (suof_handle->img_table.num_simgs != 0) {
  1062. suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs *
  1063. sizeof(img_header), GFP_KERNEL);
  1064. if (!suof_img_hdr)
  1065. return -ENOMEM;
  1066. suof_handle->img_table.simg_hdr = suof_img_hdr;
  1067. }
  1068. for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
  1069. qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i],
  1070. &suof_chunk_hdr[1 + i]);
  1071. ret = qat_uclo_check_simg_compat(handle,
  1072. &suof_img_hdr[i]);
  1073. if (ret)
  1074. return ret;
  1075. if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
  1076. ae0_img = i;
  1077. }
  1078. qat_uclo_tail_img(suof_img_hdr, ae0_img,
  1079. suof_handle->img_table.num_simgs);
  1080. return 0;
  1081. }
  1082. #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + low)
  1083. #define BITS_IN_DWORD 32
  1084. static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
  1085. struct icp_qat_fw_auth_desc *desc)
  1086. {
  1087. unsigned int fcu_sts, retry = 0;
  1088. u64 bus_addr;
  1089. bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
  1090. - sizeof(struct icp_qat_auth_chunk);
  1091. SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD));
  1092. SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr);
  1093. SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH);
  1094. do {
  1095. msleep(FW_AUTH_WAIT_PERIOD);
  1096. fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
  1097. if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
  1098. goto auth_fail;
  1099. if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
  1100. if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
  1101. return 0;
  1102. } while (retry++ < FW_AUTH_MAX_RETRY);
  1103. auth_fail:
  1104. pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
  1105. fcu_sts & FCU_AUTH_STS_MASK, retry);
  1106. return -EINVAL;
  1107. }
  1108. static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
  1109. struct icp_firml_dram_desc *dram_desc,
  1110. unsigned int size)
  1111. {
  1112. void *vptr;
  1113. dma_addr_t ptr;
  1114. vptr = dma_alloc_coherent(&handle->pci_dev->dev,
  1115. size, &ptr, GFP_KERNEL);
  1116. if (!vptr)
  1117. return -ENOMEM;
  1118. dram_desc->dram_base_addr_v = vptr;
  1119. dram_desc->dram_bus_addr = ptr;
  1120. dram_desc->dram_size = size;
  1121. return 0;
  1122. }
  1123. static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
  1124. struct icp_firml_dram_desc *dram_desc)
  1125. {
  1126. dma_free_coherent(&handle->pci_dev->dev,
  1127. (size_t)(dram_desc->dram_size),
  1128. (dram_desc->dram_base_addr_v),
  1129. dram_desc->dram_bus_addr);
  1130. memset(dram_desc, 0, sizeof(*dram_desc));
  1131. }
  1132. static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
  1133. struct icp_qat_fw_auth_desc **desc)
  1134. {
  1135. struct icp_firml_dram_desc dram_desc;
  1136. dram_desc.dram_base_addr_v = *desc;
  1137. dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
  1138. (*desc))->chunk_bus_addr;
  1139. dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
  1140. (*desc))->chunk_size;
  1141. qat_uclo_simg_free(handle, &dram_desc);
  1142. }
  1143. static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
  1144. char *image, unsigned int size,
  1145. struct icp_qat_fw_auth_desc **desc)
  1146. {
  1147. struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
  1148. struct icp_qat_fw_auth_desc *auth_desc;
  1149. struct icp_qat_auth_chunk *auth_chunk;
  1150. u64 virt_addr, bus_addr, virt_base;
  1151. unsigned int length, simg_offset = sizeof(*auth_chunk);
  1152. struct icp_firml_dram_desc img_desc;
  1153. if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
  1154. pr_err("QAT: error, input image size overflow %d\n", size);
  1155. return -EINVAL;
  1156. }
  1157. length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
  1158. ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
  1159. size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
  1160. if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
  1161. pr_err("QAT: error, allocate continuous dram fail\n");
  1162. return -ENOMEM;
  1163. }
  1164. auth_chunk = img_desc.dram_base_addr_v;
  1165. auth_chunk->chunk_size = img_desc.dram_size;
  1166. auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
  1167. virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
  1168. bus_addr = img_desc.dram_bus_addr + simg_offset;
  1169. auth_desc = img_desc.dram_base_addr_v;
  1170. auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
  1171. auth_desc->css_hdr_low = (unsigned int)bus_addr;
  1172. virt_addr = virt_base;
  1173. memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
  1174. /* pub key */
  1175. bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
  1176. sizeof(*css_hdr);
  1177. virt_addr = virt_addr + sizeof(*css_hdr);
  1178. auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
  1179. auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
  1180. memcpy((void *)(uintptr_t)virt_addr,
  1181. (void *)(image + sizeof(*css_hdr)),
  1182. ICP_QAT_CSS_FWSK_MODULUS_LEN);
  1183. /* padding */
  1184. memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN),
  1185. 0, ICP_QAT_CSS_FWSK_PAD_LEN);
  1186. /* exponent */
  1187. memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
  1188. ICP_QAT_CSS_FWSK_PAD_LEN),
  1189. (void *)(image + sizeof(*css_hdr) +
  1190. ICP_QAT_CSS_FWSK_MODULUS_LEN),
  1191. sizeof(unsigned int));
  1192. /* signature */
  1193. bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
  1194. auth_desc->fwsk_pub_low) +
  1195. ICP_QAT_CSS_FWSK_PUB_LEN;
  1196. virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
  1197. auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
  1198. auth_desc->signature_low = (unsigned int)bus_addr;
  1199. memcpy((void *)(uintptr_t)virt_addr,
  1200. (void *)(image + sizeof(*css_hdr) +
  1201. ICP_QAT_CSS_FWSK_MODULUS_LEN +
  1202. ICP_QAT_CSS_FWSK_EXPONENT_LEN),
  1203. ICP_QAT_CSS_SIGNATURE_LEN);
  1204. bus_addr = ADD_ADDR(auth_desc->signature_high,
  1205. auth_desc->signature_low) +
  1206. ICP_QAT_CSS_SIGNATURE_LEN;
  1207. virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
  1208. auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
  1209. auth_desc->img_low = (unsigned int)bus_addr;
  1210. auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
  1211. memcpy((void *)(uintptr_t)virt_addr,
  1212. (void *)(image + ICP_QAT_AE_IMG_OFFSET),
  1213. auth_desc->img_len);
  1214. virt_addr = virt_base;
  1215. /* AE firmware */
  1216. if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
  1217. CSS_AE_FIRMWARE) {
  1218. auth_desc->img_ae_mode_data_high = auth_desc->img_high;
  1219. auth_desc->img_ae_mode_data_low = auth_desc->img_low;
  1220. bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
  1221. auth_desc->img_ae_mode_data_low) +
  1222. sizeof(struct icp_qat_simg_ae_mode);
  1223. auth_desc->img_ae_init_data_high = (unsigned int)
  1224. (bus_addr >> BITS_IN_DWORD);
  1225. auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
  1226. bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
  1227. auth_desc->img_ae_insts_high = (unsigned int)
  1228. (bus_addr >> BITS_IN_DWORD);
  1229. auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
  1230. } else {
  1231. auth_desc->img_ae_insts_high = auth_desc->img_high;
  1232. auth_desc->img_ae_insts_low = auth_desc->img_low;
  1233. }
  1234. *desc = auth_desc;
  1235. return 0;
  1236. }
  1237. static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
  1238. struct icp_qat_fw_auth_desc *desc)
  1239. {
  1240. unsigned int i;
  1241. unsigned int fcu_sts;
  1242. struct icp_qat_simg_ae_mode *virt_addr;
  1243. unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
  1244. virt_addr = (void *)((uintptr_t)desc +
  1245. sizeof(struct icp_qat_auth_chunk) +
  1246. sizeof(struct icp_qat_css_hdr) +
  1247. ICP_QAT_CSS_FWSK_PUB_LEN +
  1248. ICP_QAT_CSS_SIGNATURE_LEN);
  1249. for (i = 0; i < handle->hal_handle->ae_max_num; i++) {
  1250. int retry = 0;
  1251. if (!((virt_addr->ae_mask >> i) & 0x1))
  1252. continue;
  1253. if (qat_hal_check_ae_active(handle, i)) {
  1254. pr_err("QAT: AE %d is active\n", i);
  1255. return -EINVAL;
  1256. }
  1257. SET_CAP_CSR(handle, FCU_CONTROL,
  1258. (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
  1259. do {
  1260. msleep(FW_AUTH_WAIT_PERIOD);
  1261. fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
  1262. if (((fcu_sts & FCU_AUTH_STS_MASK) ==
  1263. FCU_STS_LOAD_DONE) &&
  1264. ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i)))
  1265. break;
  1266. } while (retry++ < FW_AUTH_MAX_RETRY);
  1267. if (retry > FW_AUTH_MAX_RETRY) {
  1268. pr_err("QAT: firmware load failed timeout %x\n", retry);
  1269. return -EINVAL;
  1270. }
  1271. }
  1272. return 0;
  1273. }
  1274. static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
  1275. void *addr_ptr, int mem_size)
  1276. {
  1277. struct icp_qat_suof_handle *suof_handle;
  1278. suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
  1279. if (!suof_handle)
  1280. return -ENOMEM;
  1281. handle->sobj_handle = suof_handle;
  1282. if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
  1283. qat_uclo_del_suof(handle);
  1284. pr_err("QAT: map SUOF failed\n");
  1285. return -EINVAL;
  1286. }
  1287. return 0;
  1288. }
  1289. int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
  1290. void *addr_ptr, int mem_size)
  1291. {
  1292. struct icp_qat_fw_auth_desc *desc = NULL;
  1293. int status = 0;
  1294. if (handle->fw_auth) {
  1295. if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
  1296. status = qat_uclo_auth_fw(handle, desc);
  1297. qat_uclo_ummap_auth_fw(handle, &desc);
  1298. } else {
  1299. if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
  1300. pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
  1301. return -EINVAL;
  1302. }
  1303. qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
  1304. }
  1305. return status;
  1306. }
  1307. static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
  1308. void *addr_ptr, int mem_size)
  1309. {
  1310. struct icp_qat_uof_filehdr *filehdr;
  1311. struct icp_qat_uclo_objhandle *objhdl;
  1312. objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
  1313. if (!objhdl)
  1314. return -ENOMEM;
  1315. objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
  1316. if (!objhdl->obj_buf)
  1317. goto out_objbuf_err;
  1318. filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
  1319. if (qat_uclo_check_uof_format(filehdr))
  1320. goto out_objhdr_err;
  1321. objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
  1322. ICP_QAT_UOF_OBJS);
  1323. if (!objhdl->obj_hdr) {
  1324. pr_err("QAT: object file chunk is null\n");
  1325. goto out_objhdr_err;
  1326. }
  1327. handle->obj_handle = objhdl;
  1328. if (qat_uclo_parse_uof_obj(handle))
  1329. goto out_overlay_obj_err;
  1330. return 0;
  1331. out_overlay_obj_err:
  1332. handle->obj_handle = NULL;
  1333. kfree(objhdl->obj_hdr);
  1334. out_objhdr_err:
  1335. kfree(objhdl->obj_buf);
  1336. out_objbuf_err:
  1337. kfree(objhdl);
  1338. return -ENOMEM;
  1339. }
  1340. int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
  1341. void *addr_ptr, int mem_size)
  1342. {
  1343. BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
  1344. (sizeof(handle->hal_handle->ae_mask) * 8));
  1345. if (!handle || !addr_ptr || mem_size < 24)
  1346. return -EINVAL;
  1347. return (handle->fw_auth) ?
  1348. qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) :
  1349. qat_uclo_map_uof_obj(handle, addr_ptr, mem_size);
  1350. }
  1351. void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
  1352. {
  1353. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  1354. unsigned int a;
  1355. if (handle->sobj_handle)
  1356. qat_uclo_del_suof(handle);
  1357. if (!obj_handle)
  1358. return;
  1359. kfree(obj_handle->uword_buf);
  1360. for (a = 0; a < obj_handle->uimage_num; a++)
  1361. kfree(obj_handle->ae_uimage[a].page);
  1362. for (a = 0; a < handle->hal_handle->ae_max_num; a++)
  1363. qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
  1364. kfree(obj_handle->obj_hdr);
  1365. kfree(obj_handle->obj_buf);
  1366. kfree(obj_handle);
  1367. handle->obj_handle = NULL;
  1368. }
  1369. static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
  1370. struct icp_qat_uclo_encap_page *encap_page,
  1371. uint64_t *uword, unsigned int addr_p,
  1372. unsigned int raddr, uint64_t fill)
  1373. {
  1374. uint64_t uwrd = 0;
  1375. unsigned int i;
  1376. if (!encap_page) {
  1377. *uword = fill;
  1378. return;
  1379. }
  1380. for (i = 0; i < encap_page->uwblock_num; i++) {
  1381. if (raddr >= encap_page->uwblock[i].start_addr &&
  1382. raddr <= encap_page->uwblock[i].start_addr +
  1383. encap_page->uwblock[i].words_num - 1) {
  1384. raddr -= encap_page->uwblock[i].start_addr;
  1385. raddr *= obj_handle->uword_in_bytes;
  1386. memcpy(&uwrd, (void *)(((uintptr_t)
  1387. encap_page->uwblock[i].micro_words) + raddr),
  1388. obj_handle->uword_in_bytes);
  1389. uwrd = uwrd & 0xbffffffffffull;
  1390. }
  1391. }
  1392. *uword = uwrd;
  1393. if (*uword == INVLD_UWORD)
  1394. *uword = fill;
  1395. }
  1396. static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
  1397. struct icp_qat_uclo_encap_page
  1398. *encap_page, unsigned int ae)
  1399. {
  1400. unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
  1401. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  1402. uint64_t fill_pat;
  1403. /* load the page starting at appropriate ustore address */
  1404. /* get fill-pattern from an image -- they are all the same */
  1405. memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
  1406. sizeof(uint64_t));
  1407. uw_physical_addr = encap_page->beg_addr_p;
  1408. uw_relative_addr = 0;
  1409. words_num = encap_page->micro_words_num;
  1410. while (words_num) {
  1411. if (words_num < UWORD_CPYBUF_SIZE)
  1412. cpylen = words_num;
  1413. else
  1414. cpylen = UWORD_CPYBUF_SIZE;
  1415. /* load the buffer */
  1416. for (i = 0; i < cpylen; i++)
  1417. qat_uclo_fill_uwords(obj_handle, encap_page,
  1418. &obj_handle->uword_buf[i],
  1419. uw_physical_addr + i,
  1420. uw_relative_addr + i, fill_pat);
  1421. /* copy the buffer to ustore */
  1422. qat_hal_wr_uwords(handle, (unsigned char)ae,
  1423. uw_physical_addr, cpylen,
  1424. obj_handle->uword_buf);
  1425. uw_physical_addr += cpylen;
  1426. uw_relative_addr += cpylen;
  1427. words_num -= cpylen;
  1428. }
  1429. }
  1430. static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
  1431. struct icp_qat_uof_image *image)
  1432. {
  1433. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  1434. unsigned int ctx_mask, s;
  1435. struct icp_qat_uclo_page *page;
  1436. unsigned char ae;
  1437. int ctx;
  1438. if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
  1439. ctx_mask = 0xff;
  1440. else
  1441. ctx_mask = 0x55;
  1442. /* load the default page and set assigned CTX PC
  1443. * to the entrypoint address */
  1444. for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
  1445. if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
  1446. continue;
  1447. /* find the slice to which this image is assigned */
  1448. for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
  1449. if (image->ctx_assigned & obj_handle->ae_data[ae].
  1450. ae_slices[s].ctx_mask_assigned)
  1451. break;
  1452. }
  1453. if (s >= obj_handle->ae_data[ae].slice_num)
  1454. continue;
  1455. page = obj_handle->ae_data[ae].ae_slices[s].page;
  1456. if (!page->encap_page->def_page)
  1457. continue;
  1458. qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
  1459. page = obj_handle->ae_data[ae].ae_slices[s].page;
  1460. for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
  1461. obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
  1462. (ctx_mask & (1 << ctx)) ? page : NULL;
  1463. qat_hal_set_live_ctx(handle, (unsigned char)ae,
  1464. image->ctx_assigned);
  1465. qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
  1466. image->entry_address);
  1467. }
  1468. }
  1469. static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
  1470. {
  1471. unsigned int i;
  1472. struct icp_qat_fw_auth_desc *desc = NULL;
  1473. struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
  1474. struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
  1475. for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
  1476. if (qat_uclo_map_auth_fw(handle,
  1477. (char *)simg_hdr[i].simg_buf,
  1478. (unsigned int)
  1479. (simg_hdr[i].simg_len),
  1480. &desc))
  1481. goto wr_err;
  1482. if (qat_uclo_auth_fw(handle, desc))
  1483. goto wr_err;
  1484. if (qat_uclo_load_fw(handle, desc))
  1485. goto wr_err;
  1486. qat_uclo_ummap_auth_fw(handle, &desc);
  1487. }
  1488. return 0;
  1489. wr_err:
  1490. qat_uclo_ummap_auth_fw(handle, &desc);
  1491. return -EINVAL;
  1492. }
  1493. static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
  1494. {
  1495. struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
  1496. unsigned int i;
  1497. if (qat_uclo_init_globals(handle))
  1498. return -EINVAL;
  1499. for (i = 0; i < obj_handle->uimage_num; i++) {
  1500. if (!obj_handle->ae_uimage[i].img_ptr)
  1501. return -EINVAL;
  1502. if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
  1503. return -EINVAL;
  1504. qat_uclo_wr_uimage_page(handle,
  1505. obj_handle->ae_uimage[i].img_ptr);
  1506. }
  1507. return 0;
  1508. }
  1509. int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
  1510. {
  1511. return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
  1512. qat_uclo_wr_uof_img(handle);
  1513. }