ipa_hdr.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include "ipa_i.h"
  13. static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36 };
  14. /**
  15. * ipa_generate_hdr_hw_tbl() - generates the headers table
  16. * @mem: [out] buffer to put the header table
  17. *
  18. * Returns: 0 on success, negative on failure
  19. */
  20. int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
  21. {
  22. struct ipa_hdr_entry *entry;
  23. mem->size = ipa_ctx->hdr_tbl.end;
  24. if (mem->size == 0) {
  25. IPAERR("hdr tbl empty\n");
  26. return -EPERM;
  27. }
  28. IPADBG("tbl_sz=%d\n", ipa_ctx->hdr_tbl.end);
  29. mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
  30. GFP_KERNEL);
  31. if (!mem->base) {
  32. IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
  33. return -ENOMEM;
  34. }
  35. memset(mem->base, 0, mem->size);
  36. list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
  37. link) {
  38. IPADBG("hdr of len %d ofst=%d\n", entry->hdr_len,
  39. entry->offset_entry->offset);
  40. memcpy(mem->base + entry->offset_entry->offset, entry->hdr,
  41. entry->hdr_len);
  42. }
  43. return 0;
  44. }
  45. /*
  46. * __ipa_commit_hdr() commits hdr to hardware
  47. * This function needs to be called with a locked mutex.
  48. */
  49. static int __ipa_commit_hdr(void)
  50. {
  51. struct ipa_desc desc = { 0 };
  52. struct ipa_mem_buffer *mem;
  53. struct ipa_hdr_init_local *cmd;
  54. u16 len;
  55. mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
  56. if (!mem) {
  57. IPAERR("failed to alloc memory object\n");
  58. goto fail_alloc_mem;
  59. }
  60. /* the immediate command param size is same for both local and system */
  61. len = sizeof(struct ipa_hdr_init_local);
  62. /*
  63. * we can use init_local ptr for init_system due to layout of the
  64. * struct
  65. */
  66. cmd = kmalloc(len, GFP_KERNEL);
  67. if (!cmd) {
  68. IPAERR("failed to alloc immediate command object\n");
  69. goto fail_alloc_cmd;
  70. }
  71. if (ipa_generate_hdr_hw_tbl(mem)) {
  72. IPAERR("fail to generate HDR HW TBL\n");
  73. goto fail_hw_tbl_gen;
  74. }
  75. if (ipa_ctx->hdr_tbl_lcl && mem->size > IPA_RAM_HDR_SIZE) {
  76. IPAERR("tbl too big, needed %d avail %d\n", mem->size,
  77. IPA_RAM_HDR_SIZE);
  78. goto fail_send_cmd;
  79. }
  80. cmd->hdr_table_addr = mem->phys_base;
  81. if (ipa_ctx->hdr_tbl_lcl) {
  82. cmd->size_hdr_table = mem->size;
  83. cmd->hdr_addr = IPA_RAM_HDR_OFST;
  84. desc.opcode = IPA_HDR_INIT_LOCAL;
  85. } else {
  86. desc.opcode = IPA_HDR_INIT_SYSTEM;
  87. }
  88. desc.pyld = cmd;
  89. desc.len = sizeof(struct ipa_hdr_init_local);
  90. desc.type = IPA_IMM_CMD_DESC;
  91. IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
  92. if (ipa_send_cmd(1, &desc)) {
  93. IPAERR("fail to send immediate command\n");
  94. goto fail_send_cmd;
  95. }
  96. if (ipa_ctx->hdr_tbl_lcl) {
  97. dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
  98. } else {
  99. if (ipa_ctx->hdr_mem.phys_base) {
  100. dma_free_coherent(NULL, ipa_ctx->hdr_mem.size,
  101. ipa_ctx->hdr_mem.base,
  102. ipa_ctx->hdr_mem.phys_base);
  103. }
  104. ipa_ctx->hdr_mem = *mem;
  105. }
  106. kfree(cmd);
  107. kfree(mem);
  108. return 0;
  109. fail_send_cmd:
  110. if (mem->base)
  111. dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
  112. fail_hw_tbl_gen:
  113. kfree(cmd);
  114. fail_alloc_cmd:
  115. kfree(mem);
  116. fail_alloc_mem:
  117. return -EPERM;
  118. }
  119. static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
  120. {
  121. struct ipa_hdr_entry *entry;
  122. struct ipa_hdr_offset_entry *offset = NULL;
  123. struct ipa_tree_node *node;
  124. u32 bin;
  125. struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
  126. if (hdr->hdr_len == 0) {
  127. IPAERR("bad parm\n");
  128. goto error;
  129. }
  130. node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
  131. if (!node) {
  132. IPAERR("failed to alloc tree node object\n");
  133. goto error;
  134. }
  135. entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, GFP_KERNEL);
  136. if (!entry) {
  137. IPAERR("failed to alloc hdr object\n");
  138. goto hdr_alloc_fail;
  139. }
  140. INIT_LIST_HEAD(&entry->link);
  141. memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
  142. entry->hdr_len = hdr->hdr_len;
  143. strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
  144. entry->is_partial = hdr->is_partial;
  145. entry->cookie = IPA_HDR_COOKIE;
  146. if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
  147. bin = IPA_HDR_BIN0;
  148. else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
  149. bin = IPA_HDR_BIN1;
  150. else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
  151. bin = IPA_HDR_BIN2;
  152. else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
  153. bin = IPA_HDR_BIN3;
  154. else {
  155. IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
  156. goto bad_hdr_len;
  157. }
  158. if (list_empty(&htbl->head_free_offset_list[bin])) {
  159. offset = kmem_cache_zalloc(ipa_ctx->hdr_offset_cache,
  160. GFP_KERNEL);
  161. if (!offset) {
  162. IPAERR("failed to alloc hdr offset object\n");
  163. goto ofst_alloc_fail;
  164. }
  165. INIT_LIST_HEAD(&offset->link);
  166. /*
  167. * for a first item grow, set the bin and offset which are set
  168. * in stone
  169. */
  170. offset->offset = htbl->end;
  171. offset->bin = bin;
  172. htbl->end += ipa_hdr_bin_sz[bin];
  173. list_add(&offset->link,
  174. &htbl->head_offset_list[bin]);
  175. } else {
  176. /* get the first free slot */
  177. offset =
  178. list_first_entry(&htbl->head_free_offset_list[bin],
  179. struct ipa_hdr_offset_entry, link);
  180. list_move(&offset->link, &htbl->head_offset_list[bin]);
  181. }
  182. entry->offset_entry = offset;
  183. list_add(&entry->link, &htbl->head_hdr_entry_list);
  184. htbl->hdr_cnt++;
  185. IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n", hdr->hdr_len,
  186. htbl->hdr_cnt, offset->offset);
  187. hdr->hdr_hdl = (u32) entry;
  188. node->hdl = hdr->hdr_hdl;
  189. if (ipa_insert(&ipa_ctx->hdr_hdl_tree, node)) {
  190. IPAERR("failed to add to tree\n");
  191. WARN_ON(1);
  192. goto ipa_insert_failed;
  193. }
  194. entry->ref_cnt++;
  195. return 0;
  196. ofst_alloc_fail:
  197. kmem_cache_free(ipa_ctx->hdr_offset_cache, offset);
  198. ipa_insert_failed:
  199. if (offset)
  200. list_move(&offset->link,
  201. &htbl->head_free_offset_list[offset->bin]);
  202. entry->offset_entry = NULL;
  203. htbl->hdr_cnt--;
  204. list_del(&entry->link);
  205. bad_hdr_len:
  206. entry->cookie = 0;
  207. kmem_cache_free(ipa_ctx->hdr_cache, entry);
  208. hdr_alloc_fail:
  209. kmem_cache_free(ipa_ctx->tree_node_cache, node);
  210. error:
  211. return -EPERM;
  212. }
  213. int __ipa_del_hdr(u32 hdr_hdl, bool by_user)
  214. {
  215. struct ipa_hdr_entry *entry = (struct ipa_hdr_entry *)hdr_hdl;
  216. struct ipa_tree_node *node;
  217. struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
  218. node = ipa_search(&ipa_ctx->hdr_hdl_tree, hdr_hdl);
  219. if (node == NULL) {
  220. IPAERR("lookup failed\n");
  221. return -EINVAL;
  222. }
  223. if (!entry || (entry->cookie != IPA_HDR_COOKIE)) {
  224. IPAERR("bad parm\n");
  225. return -EINVAL;
  226. }
  227. IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
  228. htbl->hdr_cnt, entry->offset_entry->offset);
  229. if (by_user && entry->user_deleted) {
  230. IPAERR("hdr already deleted by user\n");
  231. return -EINVAL;
  232. }
  233. if (by_user) {
  234. if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
  235. IPADBG("Trying to delete hdr %s offset=%u\n",
  236. entry->name, entry->offset_entry->offset);
  237. if (!entry->offset_entry->offset) {
  238. IPAERR("User cannot delete default header\n");
  239. return -EPERM;
  240. }
  241. }
  242. entry->user_deleted = true;
  243. }
  244. if (--entry->ref_cnt) {
  245. IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
  246. return 0;
  247. }
  248. /* move the offset entry to appropriate free list */
  249. list_move(&entry->offset_entry->link,
  250. &htbl->head_free_offset_list[entry->offset_entry->bin]);
  251. list_del(&entry->link);
  252. htbl->hdr_cnt--;
  253. entry->cookie = 0;
  254. kmem_cache_free(ipa_ctx->hdr_cache, entry);
  255. /* remove the handle from the database */
  256. rb_erase(&node->node, &ipa_ctx->hdr_hdl_tree);
  257. kmem_cache_free(ipa_ctx->tree_node_cache, node);
  258. return 0;
  259. }
  260. /**
  261. * ipa_add_hdr() - add the specified headers to SW and optionally commit them to
  262. * IPA HW
  263. * @hdrs: [inout] set of headers to add
  264. *
  265. * Returns: 0 on success, negative on failure
  266. *
  267. * Note: Should not be called from atomic context
  268. */
  269. int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
  270. {
  271. int i;
  272. int result = -EFAULT;
  273. if (hdrs == NULL || hdrs->num_hdrs == 0) {
  274. IPAERR("bad parm\n");
  275. return -EINVAL;
  276. }
  277. mutex_lock(&ipa_ctx->lock);
  278. for (i = 0; i < hdrs->num_hdrs; i++) {
  279. if (__ipa_add_hdr(&hdrs->hdr[i])) {
  280. IPAERR("failed to add hdr %d\n", i);
  281. hdrs->hdr[i].status = -1;
  282. } else {
  283. hdrs->hdr[i].status = 0;
  284. }
  285. }
  286. if (hdrs->commit) {
  287. if (__ipa_commit_hdr()) {
  288. result = -EPERM;
  289. goto bail;
  290. }
  291. }
  292. result = 0;
  293. bail:
  294. mutex_unlock(&ipa_ctx->lock);
  295. return result;
  296. }
  297. EXPORT_SYMBOL(ipa_add_hdr);
  298. /**
  299. * ipa_del_hdr_by_user() - Remove the specified headers
  300. * from SW and optionally commit them to IPA HW
  301. * @hdls: [inout] set of headers to delete
  302. * @by_user: Operation requested by user?
  303. *
  304. * Returns: 0 on success, negative on failure
  305. *
  306. * Note: Should not be called from atomic context
  307. */
  308. int ipa_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
  309. {
  310. int i;
  311. int result = -EFAULT;
  312. if (hdls == NULL || hdls->num_hdls == 0) {
  313. IPAERR("bad parm\n");
  314. return -EINVAL;
  315. }
  316. mutex_lock(&ipa_ctx->lock);
  317. for (i = 0; i < hdls->num_hdls; i++) {
  318. if (__ipa_del_hdr(hdls->hdl[i].hdl, by_user)) {
  319. IPAERR("failed to del hdr %i\n", i);
  320. hdls->hdl[i].status = -1;
  321. } else {
  322. hdls->hdl[i].status = 0;
  323. }
  324. }
  325. if (hdls->commit) {
  326. if (__ipa_commit_hdr()) {
  327. result = -EPERM;
  328. goto bail;
  329. }
  330. }
  331. result = 0;
  332. bail:
  333. mutex_unlock(&ipa_ctx->lock);
  334. return result;
  335. }
  336. /**
  337. * ipa_del_hdr() - Remove the specified headers from SW and optionally commit them
  338. * to IPA HW
  339. * @hdls: [inout] set of headers to delete
  340. *
  341. * Returns: 0 on success, negative on failure
  342. *
  343. * Note: Should not be called from atomic context
  344. */
  345. int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
  346. {
  347. return ipa_del_hdr_by_user(hdls, false);
  348. }
  349. EXPORT_SYMBOL(ipa_del_hdr);
  350. /**
  351. * ipa_dump_hdr() - prints all the headers in the header table in SW
  352. *
  353. * Note: Should not be called from atomic context
  354. */
  355. void ipa_dump_hdr(void)
  356. {
  357. struct ipa_hdr_entry *entry;
  358. IPADBG("START\n");
  359. mutex_lock(&ipa_ctx->lock);
  360. list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
  361. link) {
  362. IPADBG("hdr_len=%4d off=%4d bin=%4d\n", entry->hdr_len,
  363. entry->offset_entry->offset,
  364. entry->offset_entry->bin);
  365. }
  366. mutex_unlock(&ipa_ctx->lock);
  367. IPADBG("END\n");
  368. }
  369. /**
  370. * ipa_commit_hdr() - commit to IPA HW the current header table in SW
  371. *
  372. * Returns: 0 on success, negative on failure
  373. *
  374. * Note: Should not be called from atomic context
  375. */
  376. int ipa_commit_hdr(void)
  377. {
  378. int result = -EFAULT;
  379. /*
  380. * issue a commit on the routing module since routing rules point to
  381. * header table entries
  382. */
  383. if (ipa_commit_rt(IPA_IP_v4))
  384. return -EPERM;
  385. if (ipa_commit_rt(IPA_IP_v6))
  386. return -EPERM;
  387. mutex_lock(&ipa_ctx->lock);
  388. if (__ipa_commit_hdr()) {
  389. result = -EPERM;
  390. goto bail;
  391. }
  392. result = 0;
  393. bail:
  394. mutex_unlock(&ipa_ctx->lock);
  395. return result;
  396. }
  397. EXPORT_SYMBOL(ipa_commit_hdr);
  398. /**
  399. * ipa_reset_hdr() - reset the current header table in SW (does not commit to
  400. * HW)
  401. *
  402. * Returns: 0 on success, negative on failure
  403. *
  404. * Note: Should not be called from atomic context
  405. */
  406. int ipa_reset_hdr(void)
  407. {
  408. struct ipa_hdr_entry *entry;
  409. struct ipa_hdr_entry *next;
  410. struct ipa_hdr_offset_entry *off_entry;
  411. struct ipa_hdr_offset_entry *off_next;
  412. struct ipa_tree_node *node;
  413. int i;
  414. /*
  415. * issue a reset on the routing module since routing rules point to
  416. * header table entries
  417. */
  418. if (ipa_reset_rt(IPA_IP_v4))
  419. IPAERR("fail to reset v4 rt\n");
  420. if (ipa_reset_rt(IPA_IP_v6))
  421. IPAERR("fail to reset v4 rt\n");
  422. mutex_lock(&ipa_ctx->lock);
  423. IPADBG("reset hdr\n");
  424. list_for_each_entry_safe(entry, next,
  425. &ipa_ctx->hdr_tbl.head_hdr_entry_list, link) {
  426. /* do not remove the default exception header */
  427. if (!strncmp(entry->name, IPA_DFLT_HDR_NAME,
  428. IPA_RESOURCE_NAME_MAX)) {
  429. IPADBG("Trying to remove hdr %s offset=%u\n",
  430. entry->name, entry->offset_entry->offset);
  431. if (!entry->offset_entry->offset) {
  432. if (entry->is_hdr_proc_ctx) {
  433. mutex_unlock(&ipa_ctx->lock);
  434. WARN_ON(1);
  435. return -EFAULT;
  436. }
  437. IPADBG("skip default header\n");
  438. continue;
  439. }
  440. }
  441. node = ipa_search(&ipa_ctx->hdr_hdl_tree, (u32) entry);
  442. if (node == NULL) {
  443. WARN_ON(1);
  444. mutex_unlock(&ipa_ctx->lock);
  445. return -EFAULT;
  446. }
  447. list_del(&entry->link);
  448. entry->cookie = 0;
  449. kmem_cache_free(ipa_ctx->hdr_cache, entry);
  450. /* remove the handle from the database */
  451. rb_erase(&node->node, &ipa_ctx->hdr_hdl_tree);
  452. kmem_cache_free(ipa_ctx->tree_node_cache, node);
  453. }
  454. for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
  455. list_for_each_entry_safe(off_entry, off_next,
  456. &ipa_ctx->hdr_tbl.head_offset_list[i],
  457. link) {
  458. /*
  459. * do not remove the default exception header which is
  460. * at offset 0
  461. */
  462. if (off_entry->offset == 0)
  463. continue;
  464. list_del(&off_entry->link);
  465. kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
  466. }
  467. list_for_each_entry_safe(off_entry, off_next,
  468. &ipa_ctx->hdr_tbl.head_free_offset_list[i],
  469. link) {
  470. list_del(&off_entry->link);
  471. kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
  472. }
  473. }
  474. /* there is one header of size 8 */
  475. ipa_ctx->hdr_tbl.end = 8;
  476. ipa_ctx->hdr_tbl.hdr_cnt = 1;
  477. mutex_unlock(&ipa_ctx->lock);
  478. return 0;
  479. }
  480. EXPORT_SYMBOL(ipa_reset_hdr);
  481. static struct ipa_hdr_entry *__ipa_find_hdr(const char *name)
  482. {
  483. struct ipa_hdr_entry *entry;
  484. list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
  485. link) {
  486. if (!strncmp(name, entry->name, IPA_RESOURCE_NAME_MAX))
  487. return entry;
  488. }
  489. return NULL;
  490. }
  491. /**
  492. * ipa_get_hdr() - Lookup the specified header resource
  493. * @lookup: [inout] header to lookup and its handle
  494. *
  495. * lookup the specified header resource and return handle if it exists
  496. *
  497. * Returns: 0 on success, negative on failure
  498. *
  499. * Note: Should not be called from atomic context
  500. * Caller should call ipa_put_hdr later if this function succeeds
  501. */
  502. int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
  503. {
  504. struct ipa_hdr_entry *entry;
  505. int result = -1;
  506. if (lookup == NULL) {
  507. IPAERR("bad parm\n");
  508. return -EINVAL;
  509. }
  510. mutex_lock(&ipa_ctx->lock);
  511. entry = __ipa_find_hdr(lookup->name);
  512. if (entry) {
  513. lookup->hdl = (uint32_t) entry;
  514. result = 0;
  515. }
  516. mutex_unlock(&ipa_ctx->lock);
  517. return result;
  518. }
  519. EXPORT_SYMBOL(ipa_get_hdr);
  520. /**
  521. * __ipa_release_hdr() - drop reference to header and cause
  522. * deletion if reference count permits
  523. * @hdr_hdl: [in] handle of header to be released
  524. *
  525. * Returns: 0 on success, negative on failure
  526. */
  527. int __ipa_release_hdr(u32 hdr_hdl)
  528. {
  529. int result = 0;
  530. if (__ipa_del_hdr(hdr_hdl, false)) {
  531. IPADBG("fail to del hdr %x\n", hdr_hdl);
  532. result = -EFAULT;
  533. goto bail;
  534. }
  535. /* commit for put */
  536. if (__ipa_commit_hdr()) {
  537. IPAERR("fail to commit hdr\n");
  538. result = -EFAULT;
  539. goto bail;
  540. }
  541. bail:
  542. return result;
  543. }
  544. /**
  545. * ipa_put_hdr() - Release the specified header handle
  546. * @hdr_hdl: [in] the header handle to release
  547. *
  548. * Returns: 0 on success, negative on failure
  549. *
  550. * Note: Should not be called from atomic context
  551. */
  552. int ipa_put_hdr(u32 hdr_hdl)
  553. {
  554. struct ipa_hdr_entry *entry = (struct ipa_hdr_entry *)hdr_hdl;
  555. struct ipa_tree_node *node;
  556. int result = -EFAULT;
  557. mutex_lock(&ipa_ctx->lock);
  558. node = ipa_search(&ipa_ctx->hdr_hdl_tree, hdr_hdl);
  559. if (node == NULL) {
  560. IPAERR("lookup failed\n");
  561. result = -EINVAL;
  562. goto bail;
  563. }
  564. if (entry == NULL || entry->cookie != IPA_HDR_COOKIE) {
  565. IPAERR("bad params\n");
  566. result = -EINVAL;
  567. goto bail;
  568. }
  569. result = 0;
  570. bail:
  571. mutex_unlock(&ipa_ctx->lock);
  572. return result;
  573. }
  574. EXPORT_SYMBOL(ipa_put_hdr);
  575. /**
  576. * ipa_copy_hdr() - Lookup the specified header resource and return a copy of it
  577. * @copy: [inout] header to lookup and its copy
  578. *
  579. * lookup the specified header resource and return a copy of it (along with its
  580. * attributes) if it exists, this would be called for partial headers
  581. *
  582. * Returns: 0 on success, negative on failure
  583. *
  584. * Note: Should not be called from atomic context
  585. */
  586. int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
  587. {
  588. struct ipa_hdr_entry *entry;
  589. int result = -EFAULT;
  590. if (copy == NULL) {
  591. IPAERR("bad parm\n");
  592. return -EINVAL;
  593. }
  594. mutex_lock(&ipa_ctx->lock);
  595. entry = __ipa_find_hdr(copy->name);
  596. if (entry) {
  597. memcpy(copy->hdr, entry->hdr, entry->hdr_len);
  598. copy->hdr_len = entry->hdr_len;
  599. copy->is_partial = entry->is_partial;
  600. result = 0;
  601. }
  602. mutex_unlock(&ipa_ctx->lock);
  603. return result;
  604. }
  605. EXPORT_SYMBOL(ipa_copy_hdr);