ipa_rt.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046
  1. /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/bitops.h>
  13. #include "ipa_i.h"
  14. #define IPA_RT_TABLE_INDEX_NOT_FOUND (-1)
  15. #define IPA_RT_TABLE_WORD_SIZE (4)
  16. #define IPA_RT_INDEX_BITMAP_SIZE (32)
  17. #define IPA_RT_TABLE_MEMORY_ALLIGNMENT (127)
  18. #define IPA_RT_ENTRY_MEMORY_ALLIGNMENT (3)
  19. #define IPA_RT_BIT_MASK (0x1)
  20. #define IPA_RT_STATUS_OF_ADD_FAILED (-1)
  21. #define IPA_RT_STATUS_OF_DEL_FAILED (-1)
  22. /**
  23. * ipa_generate_rt_hw_rule() - generates the routing hardware rule
  24. * @ip: the ip address family type
  25. * @entry: routing entry
  26. * @buf: output buffer, buf == NULL means
  27. * caller wants to know the size of the rule as seen
  28. * by HW so they did not pass a valid buffer, we will use a
  29. * scratch buffer instead.
  30. * With this scheme we are going to
  31. * generate the rule twice, once to know size using scratch
  32. * buffer and second to write the rule to the actual caller
  33. * supplied buffer which is of required size
  34. *
  35. * Returns: 0 on success, negative on failure
  36. *
  37. * caller needs to hold any needed locks to ensure integrity
  38. *
  39. */
  40. static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
  41. struct ipa_rt_entry *entry, u8 *buf)
  42. {
  43. struct ipa_rt_rule_hw_hdr *rule_hdr;
  44. const struct ipa_rt_rule *rule =
  45. (const struct ipa_rt_rule *)&entry->rule;
  46. u16 en_rule = 0;
  47. u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
  48. u8 *start;
  49. int pipe_idx;
  50. if (buf == NULL) {
  51. memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
  52. buf = (u8 *)tmp;
  53. }
  54. start = buf;
  55. rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
  56. pipe_idx = ipa_get_ep_mapping(ipa_ctx->mode,
  57. entry->rule.dst);
  58. if (pipe_idx == -1) {
  59. IPAERR("Wrong destination pipe specified in RT rule\n");
  60. WARN_ON(1);
  61. return -EPERM;
  62. }
  63. rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
  64. rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
  65. if (entry->hdr) {
  66. rule_hdr->u.hdr.hdr_offset =
  67. entry->hdr->offset_entry->offset >> 2;
  68. } else {
  69. rule_hdr->u.hdr.hdr_offset = 0;
  70. }
  71. buf += sizeof(struct ipa_rt_rule_hw_hdr);
  72. if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
  73. IPAERR("fail to generate hw rule\n");
  74. return -EPERM;
  75. }
  76. IPADBG("en_rule 0x%x\n", en_rule);
  77. rule_hdr->u.hdr.en_rule = en_rule;
  78. ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
  79. if (entry->hw_len == 0) {
  80. entry->hw_len = buf - start;
  81. } else if (entry->hw_len != (buf - start)) {
  82. IPAERR(
  83. "hw_len differs b/w passes passed=0x%x calc=0x%x\n",
  84. entry->hw_len,
  85. (buf - start));
  86. return -EPERM;
  87. }
  88. return 0;
  89. }
  90. /**
  91. * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table
  92. * @ip: the ip address family type
  93. * @hdr_sz: header size
  94. * @max_rt_idx: maximal index
  95. *
  96. * Returns: size on success, negative on failure
  97. *
  98. * caller needs to hold any needed locks to ensure integrity
  99. *
  100. * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl
  101. */
  102. static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz,
  103. int *max_rt_idx)
  104. {
  105. struct ipa_rt_tbl_set *set;
  106. struct ipa_rt_tbl *tbl;
  107. struct ipa_rt_entry *entry;
  108. u32 total_sz = 0;
  109. u32 tbl_sz;
  110. u32 bitmap = ipa_ctx->rt_idx_bitmap[ip];
  111. int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND;
  112. int i;
  113. *hdr_sz = 0;
  114. set = &ipa_ctx->rt_tbl_set[ip];
  115. for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
  116. if (bitmap & IPA_RT_BIT_MASK)
  117. highest_bit_set = i;
  118. bitmap >>= 1;
  119. }
  120. *max_rt_idx = highest_bit_set;
  121. if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) {
  122. IPAERR("no rt tbls present\n");
  123. total_sz = IPA_RT_TABLE_WORD_SIZE;
  124. *hdr_sz = IPA_RT_TABLE_WORD_SIZE;
  125. return total_sz;
  126. }
  127. *hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE;
  128. total_sz += *hdr_sz;
  129. list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
  130. tbl_sz = 0;
  131. list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
  132. if (ipa_generate_rt_hw_rule(ip, entry, NULL)) {
  133. IPAERR("failed to find HW RT rule size\n");
  134. return -EPERM;
  135. }
  136. tbl_sz += entry->hw_len;
  137. }
  138. if (tbl_sz)
  139. tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE;
  140. if (tbl->in_sys)
  141. continue;
  142. if (tbl_sz) {
  143. /* add the terminator */
  144. total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE);
  145. /* every rule-set should start at word boundary */
  146. total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) &
  147. ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
  148. }
  149. }
  150. IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
  151. return total_sz;
  152. }
  153. /**
  154. * ipa_generate_rt_hw_tbl() - generates the routing hardware table
  155. * @ip: [in] the ip address family type
  156. * @mem: [out] buffer to put the filtering table
  157. *
  158. * Returns: 0 on success, negative on failure
  159. */
  160. int ipa_generate_rt_hw_tbl(enum ipa_ip_type ip, struct ipa_mem_buffer *mem)
  161. {
  162. struct ipa_rt_tbl *tbl;
  163. struct ipa_rt_entry *entry;
  164. struct ipa_rt_tbl_set *set;
  165. u32 hdr_sz;
  166. u32 offset;
  167. u8 *hdr;
  168. u8 *body;
  169. u8 *base;
  170. struct ipa_mem_buffer rt_tbl_mem;
  171. u8 *rt_tbl_mem_body;
  172. int max_rt_idx;
  173. int i;
  174. int res;
  175. res = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
  176. if (res < 0) {
  177. IPAERR("ipa_get_rt_hw_tbl_size failed %d\n", res);
  178. goto error;
  179. }
  180. mem->size = res;
  181. mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
  182. ~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
  183. if (mem->size == 0) {
  184. IPAERR("rt tbl empty ip=%d\n", ip);
  185. goto error;
  186. }
  187. mem->base = dma_alloc_coherent(NULL, mem->size, &mem->phys_base,
  188. GFP_KERNEL);
  189. if (!mem->base) {
  190. IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
  191. goto error;
  192. }
  193. memset(mem->base, 0, mem->size);
  194. /* build the rt tbl in the DMA buffer to submit to IPA HW */
  195. base = hdr = (u8 *)mem->base;
  196. body = base + hdr_sz;
  197. /* setup all indices to point to the empty sys rt tbl */
  198. for (i = 0; i <= max_rt_idx; i++)
  199. ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base,
  200. hdr + (i * IPA_RT_TABLE_WORD_SIZE));
  201. set = &ipa_ctx->rt_tbl_set[ip];
  202. list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
  203. offset = body - base;
  204. if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) {
  205. IPAERR("offset is not word multiple %d\n", offset);
  206. goto proc_err;
  207. }
  208. if (!tbl->in_sys) {
  209. /* convert offset to words from bytes */
  210. offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
  211. /* rule is at an offset from base */
  212. offset |= IPA_RT_BIT_MASK;
  213. /* update the hdr at the right index */
  214. ipa_write_32(offset, hdr +
  215. (tbl->idx * IPA_RT_TABLE_WORD_SIZE));
  216. /* generate the rule-set */
  217. list_for_each_entry(entry, &tbl->head_rt_rule_list,
  218. link) {
  219. if (ipa_generate_rt_hw_rule(ip, entry, body)) {
  220. IPAERR("failed to gen HW RT rule\n");
  221. goto proc_err;
  222. }
  223. body += entry->hw_len;
  224. }
  225. /* write the rule-set terminator */
  226. body = ipa_write_32(0, body);
  227. if ((u32)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT)
  228. /* advance body to next word boundary */
  229. body = body + (IPA_RT_TABLE_WORD_SIZE -
  230. ((u32)body &
  231. IPA_RT_ENTRY_MEMORY_ALLIGNMENT));
  232. } else {
  233. if (tbl->sz == 0) {
  234. IPAERR("cannot generate 0 size table\n");
  235. goto proc_err;
  236. }
  237. /* allocate memory for the RT tbl */
  238. rt_tbl_mem.size = tbl->sz;
  239. rt_tbl_mem.base =
  240. dma_alloc_coherent(NULL, rt_tbl_mem.size,
  241. &rt_tbl_mem.phys_base, GFP_KERNEL);
  242. if (!rt_tbl_mem.base) {
  243. IPAERR("fail to alloc DMA buff of size %d\n",
  244. rt_tbl_mem.size);
  245. WARN_ON(1);
  246. goto proc_err;
  247. }
  248. WARN_ON(rt_tbl_mem.phys_base &
  249. IPA_RT_ENTRY_MEMORY_ALLIGNMENT);
  250. rt_tbl_mem_body = rt_tbl_mem.base;
  251. memset(rt_tbl_mem.base, 0, rt_tbl_mem.size);
  252. /* update the hdr at the right index */
  253. ipa_write_32(rt_tbl_mem.phys_base,
  254. hdr + (tbl->idx *
  255. IPA_RT_TABLE_WORD_SIZE));
  256. /* generate the rule-set */
  257. list_for_each_entry(entry, &tbl->head_rt_rule_list,
  258. link) {
  259. if (ipa_generate_rt_hw_rule(ip, entry,
  260. rt_tbl_mem_body)) {
  261. IPAERR("failed to gen HW RT rule\n");
  262. WARN_ON(1);
  263. goto rt_table_mem_alloc_failed;
  264. }
  265. rt_tbl_mem_body += entry->hw_len;
  266. }
  267. /* write the rule-set terminator */
  268. rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body);
  269. if (tbl->curr_mem.phys_base) {
  270. WARN_ON(tbl->prev_mem.phys_base);
  271. tbl->prev_mem = tbl->curr_mem;
  272. }
  273. tbl->curr_mem = rt_tbl_mem;
  274. }
  275. }
  276. return 0;
  277. rt_table_mem_alloc_failed:
  278. dma_free_coherent(NULL, rt_tbl_mem.size,
  279. rt_tbl_mem.base, rt_tbl_mem.phys_base);
  280. proc_err:
  281. dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
  282. mem->base = NULL;
  283. error:
  284. return -EPERM;
  285. }
  286. static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
  287. {
  288. struct ipa_rt_tbl *tbl;
  289. struct ipa_rt_tbl *next;
  290. struct ipa_rt_tbl_set *set;
  291. set = &ipa_ctx->rt_tbl_set[ip];
  292. list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
  293. if (tbl->prev_mem.phys_base) {
  294. IPADBG("reaping rt tbl name=%s ip=%d\n", tbl->name, ip);
  295. dma_free_coherent(NULL, tbl->prev_mem.size,
  296. tbl->prev_mem.base,
  297. tbl->prev_mem.phys_base);
  298. memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
  299. }
  300. }
  301. set = &ipa_ctx->reap_rt_tbl_set[ip];
  302. list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
  303. list_del(&tbl->link);
  304. WARN_ON(tbl->prev_mem.phys_base != 0);
  305. if (tbl->curr_mem.phys_base) {
  306. IPADBG("reaping sys rt tbl name=%s ip=%d\n", tbl->name,
  307. ip);
  308. dma_free_coherent(NULL, tbl->curr_mem.size,
  309. tbl->curr_mem.base,
  310. tbl->curr_mem.phys_base);
  311. kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
  312. }
  313. }
  314. }
  315. static int __ipa_commit_rt(enum ipa_ip_type ip)
  316. {
  317. struct ipa_desc desc = { 0 };
  318. struct ipa_mem_buffer *mem;
  319. void *cmd;
  320. struct ipa_ip_v4_routing_init *v4;
  321. struct ipa_ip_v6_routing_init *v6;
  322. u16 avail;
  323. u16 size;
  324. mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
  325. if (!mem) {
  326. IPAERR("failed to alloc memory object\n");
  327. goto fail_alloc_mem;
  328. }
  329. if (ip == IPA_IP_v4) {
  330. avail = ipa_ctx->ip4_rt_tbl_lcl ? IPA_RAM_V4_RT_SIZE :
  331. IPA_RAM_V4_RT_SIZE_DDR;
  332. size = sizeof(struct ipa_ip_v4_routing_init);
  333. } else {
  334. avail = ipa_ctx->ip6_rt_tbl_lcl ? IPA_RAM_V6_RT_SIZE :
  335. IPA_RAM_V6_RT_SIZE_DDR;
  336. size = sizeof(struct ipa_ip_v6_routing_init);
  337. }
  338. cmd = kmalloc(size, GFP_KERNEL);
  339. if (!cmd) {
  340. IPAERR("failed to alloc immediate command object\n");
  341. goto fail_alloc_cmd;
  342. }
  343. if (ipa_generate_rt_hw_tbl(ip, mem)) {
  344. IPAERR("fail to generate RT HW TBL ip %d\n", ip);
  345. goto fail_hw_tbl_gen;
  346. }
  347. if (mem->size > avail) {
  348. IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
  349. goto fail_send_cmd;
  350. }
  351. if (ip == IPA_IP_v4) {
  352. v4 = (struct ipa_ip_v4_routing_init *)cmd;
  353. desc.opcode = IPA_IP_V4_ROUTING_INIT;
  354. v4->ipv4_rules_addr = mem->phys_base;
  355. v4->size_ipv4_rules = mem->size;
  356. v4->ipv4_addr = IPA_RAM_V4_RT_OFST;
  357. } else {
  358. v6 = (struct ipa_ip_v6_routing_init *)cmd;
  359. desc.opcode = IPA_IP_V6_ROUTING_INIT;
  360. v6->ipv6_rules_addr = mem->phys_base;
  361. v6->size_ipv6_rules = mem->size;
  362. v6->ipv6_addr = IPA_RAM_V6_RT_OFST;
  363. }
  364. desc.pyld = cmd;
  365. desc.len = size;
  366. desc.type = IPA_IMM_CMD_DESC;
  367. IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
  368. if (ipa_send_cmd(1, &desc)) {
  369. IPAERR("fail to send immediate command\n");
  370. goto fail_send_cmd;
  371. }
  372. __ipa_reap_sys_rt_tbls(ip);
  373. dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
  374. kfree(cmd);
  375. kfree(mem);
  376. return 0;
  377. fail_send_cmd:
  378. if (mem->base)
  379. dma_free_coherent(NULL, mem->size, mem->base, mem->phys_base);
  380. fail_hw_tbl_gen:
  381. kfree(cmd);
  382. fail_alloc_cmd:
  383. kfree(mem);
  384. fail_alloc_mem:
  385. return -EPERM;
  386. }
  387. /**
  388. * __ipa_find_rt_tbl() - find the routing table
  389. * which name is given as parameter
  390. * @ip: [in] the ip address family type of the wanted routing table
  391. * @name: [in] the name of the wanted routing table
  392. *
  393. * Returns: the routing table which name is given as parameter, or NULL if it
  394. * doesn't exist
  395. */
  396. struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name)
  397. {
  398. struct ipa_rt_tbl *entry;
  399. struct ipa_rt_tbl_set *set;
  400. set = &ipa_ctx->rt_tbl_set[ip];
  401. list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
  402. if (!strncmp(name, entry->name, IPA_RESOURCE_NAME_MAX))
  403. return entry;
  404. }
  405. return NULL;
  406. }
  407. static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
  408. const char *name)
  409. {
  410. struct ipa_rt_tbl *entry;
  411. struct ipa_rt_tbl_set *set;
  412. struct ipa_tree_node *node;
  413. int i;
  414. node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
  415. if (!node) {
  416. IPAERR("failed to alloc tree node object\n");
  417. goto node_alloc_fail;
  418. }
  419. if (ip >= IPA_IP_MAX || name == NULL) {
  420. IPAERR("bad parm\n");
  421. goto error;
  422. }
  423. set = &ipa_ctx->rt_tbl_set[ip];
  424. /* check if this table exists */
  425. entry = __ipa_find_rt_tbl(ip, name);
  426. if (!entry) {
  427. entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL);
  428. if (!entry) {
  429. IPAERR("failed to alloc RT tbl object\n");
  430. goto error;
  431. }
  432. /* find a routing tbl index */
  433. for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
  434. if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) {
  435. entry->idx = i;
  436. set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]);
  437. break;
  438. }
  439. }
  440. if (i == IPA_RT_INDEX_BITMAP_SIZE) {
  441. IPAERR("not free RT tbl indices left\n");
  442. goto fail_rt_idx_alloc;
  443. }
  444. INIT_LIST_HEAD(&entry->head_rt_rule_list);
  445. INIT_LIST_HEAD(&entry->link);
  446. strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
  447. entry->set = set;
  448. entry->cookie = IPA_RT_TBL_COOKIE;
  449. entry->in_sys = (ip == IPA_IP_v4) ?
  450. !ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
  451. set->tbl_cnt++;
  452. list_add(&entry->link, &set->head_rt_tbl_list);
  453. IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
  454. set->tbl_cnt, ip);
  455. node->hdl = (u32)entry;
  456. if (ipa_insert(&ipa_ctx->rt_tbl_hdl_tree, node)) {
  457. IPAERR("failed to add to tree\n");
  458. WARN_ON(1);
  459. goto ipa_insert_failed;
  460. }
  461. } else {
  462. kmem_cache_free(ipa_ctx->tree_node_cache, node);
  463. }
  464. return entry;
  465. ipa_insert_failed:
  466. set->tbl_cnt--;
  467. list_del(&entry->link);
  468. fail_rt_idx_alloc:
  469. entry->cookie = 0;
  470. kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
  471. error:
  472. kmem_cache_free(ipa_ctx->tree_node_cache, node);
  473. node_alloc_fail:
  474. return NULL;
  475. }
  476. static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
  477. {
  478. struct ipa_tree_node *node;
  479. enum ipa_ip_type ip = IPA_IP_MAX;
  480. if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
  481. IPAERR("bad parms\n");
  482. return -EINVAL;
  483. }
  484. node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, (u32)entry);
  485. if (node == NULL) {
  486. IPAERR("lookup failed\n");
  487. return -EPERM;
  488. }
  489. if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
  490. ip = IPA_IP_v4;
  491. else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
  492. ip = IPA_IP_v6;
  493. else {
  494. WARN_ON(1);
  495. return -EPERM;
  496. }
  497. if (!entry->in_sys) {
  498. list_del(&entry->link);
  499. clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
  500. entry->set->tbl_cnt--;
  501. IPADBG("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
  502. entry->set->tbl_cnt);
  503. kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
  504. } else {
  505. list_move(&entry->link,
  506. &ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list);
  507. clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
  508. entry->set->tbl_cnt--;
  509. IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
  510. entry->set->tbl_cnt);
  511. }
  512. /* remove the handle from the database */
  513. rb_erase(&node->node, &ipa_ctx->rt_tbl_hdl_tree);
  514. kmem_cache_free(ipa_ctx->tree_node_cache, node);
  515. return 0;
  516. }
  517. static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
  518. const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
  519. {
  520. struct ipa_rt_tbl *tbl;
  521. struct ipa_rt_entry *entry;
  522. struct ipa_tree_node *node;
  523. if (rule->hdr_hdl &&
  524. ((ipa_search(&ipa_ctx->hdr_hdl_tree, rule->hdr_hdl) == NULL) ||
  525. ((struct ipa_hdr_entry *)rule->hdr_hdl)->cookie != IPA_HDR_COOKIE)) {
  526. IPAERR("rt rule does not point to valid hdr\n");
  527. goto error;
  528. }
  529. node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
  530. if (!node) {
  531. IPAERR("failed to alloc tree node object\n");
  532. goto error;
  533. }
  534. tbl = __ipa_add_rt_tbl(ip, name);
  535. if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
  536. IPAERR("bad params\n");
  537. goto fail_rt_tbl_sanity;
  538. }
  539. /*
  540. * do not allow any rule to be added at "default" routing
  541. * table
  542. */
  543. if (!strncmp(tbl->name, IPA_DFLT_RT_TBL_NAME, IPA_RESOURCE_NAME_MAX) &&
  544. (tbl->rule_cnt > 0)) {
  545. IPAERR_RL("cannot add rules to default rt table\n");
  546. goto fail_rt_tbl_sanity;
  547. }
  548. entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL);
  549. if (!entry) {
  550. IPAERR("failed to alloc RT rule object\n");
  551. goto fail_rt_tbl_sanity;
  552. }
  553. INIT_LIST_HEAD(&entry->link);
  554. entry->cookie = IPA_RT_RULE_COOKIE;
  555. entry->rule = *rule;
  556. entry->tbl = tbl;
  557. entry->hdr = (struct ipa_hdr_entry *)rule->hdr_hdl;
  558. if (at_rear)
  559. list_add_tail(&entry->link, &tbl->head_rt_rule_list);
  560. else
  561. list_add(&entry->link, &tbl->head_rt_rule_list);
  562. tbl->rule_cnt++;
  563. if (entry->hdr)
  564. entry->hdr->ref_cnt++;
  565. IPADBG("add rt rule tbl_idx=%d rule_cnt=%d\n", tbl->idx, tbl->rule_cnt);
  566. *rule_hdl = (u32)entry;
  567. node->hdl = *rule_hdl;
  568. if (ipa_insert(&ipa_ctx->rt_rule_hdl_tree, node)) {
  569. IPAERR("failed to add to tree\n");
  570. WARN_ON(1);
  571. goto ipa_insert_failed;
  572. }
  573. return 0;
  574. ipa_insert_failed:
  575. list_del(&entry->link);
  576. kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
  577. fail_rt_tbl_sanity:
  578. kmem_cache_free(ipa_ctx->tree_node_cache, node);
  579. error:
  580. return -EPERM;
  581. }
  582. /**
  583. * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally
  584. * commit to IPA HW
  585. * @rules: [inout] set of routing rules to add
  586. *
  587. * Returns: 0 on success, negative on failure
  588. *
  589. * Note: Should not be called from atomic context
  590. */
  591. int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
  592. {
  593. int i;
  594. int ret;
  595. if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
  596. IPAERR("bad parm\n");
  597. return -EINVAL;
  598. }
  599. mutex_lock(&ipa_ctx->lock);
  600. for (i = 0; i < rules->num_rules; i++) {
  601. if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
  602. &rules->rules[i].rule,
  603. rules->rules[i].at_rear,
  604. &rules->rules[i].rt_rule_hdl)) {
  605. IPAERR("failed to add rt rule %d\n", i);
  606. rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
  607. } else {
  608. rules->rules[i].status = 0;
  609. }
  610. }
  611. if (rules->commit)
  612. if (__ipa_commit_rt(rules->ip)) {
  613. ret = -EPERM;
  614. goto bail;
  615. }
  616. ret = 0;
  617. bail:
  618. mutex_unlock(&ipa_ctx->lock);
  619. return ret;
  620. }
  621. EXPORT_SYMBOL(ipa_add_rt_rule);
  622. int __ipa_del_rt_rule(u32 rule_hdl)
  623. {
  624. struct ipa_rt_entry *entry = (struct ipa_rt_entry *)rule_hdl;
  625. struct ipa_tree_node *node;
  626. node = ipa_search(&ipa_ctx->rt_rule_hdl_tree, rule_hdl);
  627. if (node == NULL) {
  628. IPAERR("lookup failed\n");
  629. return -EINVAL;
  630. }
  631. if (entry == NULL || (entry->cookie != IPA_RT_RULE_COOKIE)) {
  632. IPAERR("bad params\n");
  633. return -EINVAL;
  634. }
  635. if (entry->hdr)
  636. __ipa_release_hdr((u32)entry->hdr);
  637. list_del(&entry->link);
  638. entry->tbl->rule_cnt--;
  639. IPADBG("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx,
  640. entry->tbl->rule_cnt);
  641. if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
  642. if (__ipa_del_rt_tbl(entry->tbl))
  643. IPAERR("fail to del RT tbl\n");
  644. }
  645. entry->cookie = 0;
  646. kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
  647. /* remove the handle from the database */
  648. rb_erase(&node->node, &ipa_ctx->rt_rule_hdl_tree);
  649. kmem_cache_free(ipa_ctx->tree_node_cache, node);
  650. return 0;
  651. }
  652. /**
  653. * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally
  654. * commit to IPA HW
  655. * @hdls: [inout] set of routing rules to delete
  656. *
  657. * Returns: 0 on success, negative on failure
  658. *
  659. * Note: Should not be called from atomic context
  660. */
  661. int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
  662. {
  663. int i;
  664. int ret;
  665. if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
  666. IPAERR("bad parm\n");
  667. return -EINVAL;
  668. }
  669. mutex_lock(&ipa_ctx->lock);
  670. for (i = 0; i < hdls->num_hdls; i++) {
  671. if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
  672. IPAERR("failed to del rt rule %i\n", i);
  673. hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
  674. } else {
  675. hdls->hdl[i].status = 0;
  676. }
  677. }
  678. if (hdls->commit)
  679. if (__ipa_commit_rt(hdls->ip)) {
  680. ret = -EPERM;
  681. goto bail;
  682. }
  683. ret = 0;
  684. bail:
  685. mutex_unlock(&ipa_ctx->lock);
  686. return ret;
  687. }
  688. EXPORT_SYMBOL(ipa_del_rt_rule);
  689. /**
  690. * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
  691. * to IPA HW
  692. * @ip: The family of routing tables
  693. *
  694. * Returns: 0 on success, negative on failure
  695. *
  696. * Note: Should not be called from atomic context
  697. */
  698. int ipa_commit_rt(enum ipa_ip_type ip)
  699. {
  700. int ret;
  701. if (ip >= IPA_IP_MAX) {
  702. IPAERR("bad parm\n");
  703. return -EINVAL;
  704. }
  705. /*
  706. * issue a commit on the filtering module of same IP type since
  707. * filtering rules point to routing tables
  708. */
  709. if (ipa_commit_flt(ip))
  710. return -EPERM;
  711. mutex_lock(&ipa_ctx->lock);
  712. if (__ipa_commit_rt(ip)) {
  713. ret = -EPERM;
  714. goto bail;
  715. }
  716. ret = 0;
  717. bail:
  718. mutex_unlock(&ipa_ctx->lock);
  719. return ret;
  720. }
  721. EXPORT_SYMBOL(ipa_commit_rt);
  722. /**
  723. * ipa_reset_rt() - reset the current SW routing table of specified type
  724. * (does not commit to HW)
  725. * @ip: The family of routing tables
  726. *
  727. * Returns: 0 on success, negative on failure
  728. *
  729. * Note: Should not be called from atomic context
  730. */
  731. int ipa_reset_rt(enum ipa_ip_type ip)
  732. {
  733. struct ipa_rt_tbl *tbl;
  734. struct ipa_rt_tbl *tbl_next;
  735. struct ipa_rt_tbl_set *set;
  736. struct ipa_rt_entry *rule;
  737. struct ipa_rt_entry *rule_next;
  738. struct ipa_tree_node *node;
  739. struct ipa_rt_tbl_set *rset;
  740. struct ipa_hdr_entry *hdr_entry;
  741. struct ipa_hdr_proc_ctx_entry *hdr_proc_entry;
  742. if (ip >= IPA_IP_MAX) {
  743. IPAERR("bad parm\n");
  744. return -EINVAL;
  745. }
  746. /*
  747. * issue a reset on the filtering module of same IP type since
  748. * filtering rules point to routing tables
  749. */
  750. if (ipa_reset_flt(ip))
  751. IPAERR("fail to reset flt ip=%d\n", ip);
  752. set = &ipa_ctx->rt_tbl_set[ip];
  753. rset = &ipa_ctx->reap_rt_tbl_set[ip];
  754. mutex_lock(&ipa_ctx->lock);
  755. IPADBG("reset rt ip=%d\n", ip);
  756. list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
  757. list_for_each_entry_safe(rule, rule_next,
  758. &tbl->head_rt_rule_list, link) {
  759. node = ipa_search(&ipa_ctx->rt_rule_hdl_tree,
  760. (u32)rule);
  761. if (node == NULL) {
  762. WARN_ON(1);
  763. mutex_unlock(&ipa_ctx->lock);
  764. return -EFAULT;
  765. }
  766. /*
  767. * for the "default" routing tbl, remove all but the
  768. * last rule
  769. */
  770. if (tbl->idx == 0 && tbl->rule_cnt == 1)
  771. continue;
  772. list_del(&rule->link);
  773. if (rule->hdr) {
  774. hdr_entry = ipa_id_find(
  775. rule->rule.hdr_hdl);
  776. if (!hdr_entry ||
  777. hdr_entry->cookie != IPA_HDR_COOKIE) {
  778. IPAERR_RL(
  779. "Header already deleted\n");
  780. return -EINVAL;
  781. }
  782. } else if (rule->proc_ctx) {
  783. hdr_proc_entry =
  784. ipa_id_find(
  785. rule->rule.hdr_proc_ctx_hdl);
  786. if (!hdr_proc_entry ||
  787. hdr_proc_entry->cookie !=
  788. IPA_PROC_HDR_COOKIE) {
  789. IPAERR_RL(
  790. "Proc entry already deleted\n");
  791. return -EINVAL;
  792. }
  793. }
  794. tbl->rule_cnt--;
  795. if (rule->hdr)
  796. __ipa_release_hdr((u32)rule->hdr);
  797. rule->cookie = 0;
  798. kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
  799. /* remove the handle from the database */
  800. rb_erase(&node->node, &ipa_ctx->rt_rule_hdl_tree);
  801. kmem_cache_free(ipa_ctx->tree_node_cache, node);
  802. }
  803. node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, (u32)tbl);
  804. if (node == NULL) {
  805. WARN_ON(1);
  806. mutex_unlock(&ipa_ctx->lock);
  807. return -EFAULT;
  808. }
  809. /* do not remove the "default" routing tbl which has index 0 */
  810. if (tbl->idx != 0) {
  811. if (!tbl->in_sys) {
  812. list_del(&tbl->link);
  813. set->tbl_cnt--;
  814. clear_bit(tbl->idx,
  815. &ipa_ctx->rt_idx_bitmap[ip]);
  816. IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
  817. tbl->idx, set->tbl_cnt);
  818. kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
  819. } else {
  820. list_move(&tbl->link, &rset->head_rt_tbl_list);
  821. clear_bit(tbl->idx,
  822. &ipa_ctx->rt_idx_bitmap[ip]);
  823. set->tbl_cnt--;
  824. IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
  825. tbl->idx, set->tbl_cnt);
  826. }
  827. /* remove the handle from the database */
  828. rb_erase(&node->node, &ipa_ctx->rt_tbl_hdl_tree);
  829. kmem_cache_free(ipa_ctx->tree_node_cache, node);
  830. }
  831. }
  832. mutex_unlock(&ipa_ctx->lock);
  833. return 0;
  834. }
  835. EXPORT_SYMBOL(ipa_reset_rt);
  836. /**
  837. * ipa_get_rt_tbl() - lookup the specified routing table and return handle if it
  838. * exists, if lookup succeeds the routing table ref cnt is increased
  839. * @lookup: [inout] routing table to lookup and its handle
  840. *
  841. * Returns: 0 on success, negative on failure
  842. *
  843. * Note: Should not be called from atomic context
  844. * Caller should call ipa_put_rt_tbl later if this function succeeds
  845. */
  846. int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
  847. {
  848. struct ipa_rt_tbl *entry;
  849. int result = -EFAULT;
  850. if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
  851. IPAERR("bad parm\n");
  852. return -EINVAL;
  853. }
  854. mutex_lock(&ipa_ctx->lock);
  855. entry = __ipa_add_rt_tbl(lookup->ip, lookup->name);
  856. if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
  857. if (entry->ref_cnt == ((u32)~0U)) {
  858. IPAERR("fail: ref count crossed limit\n");
  859. goto ret;
  860. }
  861. entry->ref_cnt++;
  862. lookup->hdl = (uint32_t)entry;
  863. /* commit for get */
  864. if (__ipa_commit_rt(lookup->ip))
  865. IPAERR("fail to commit RT tbl\n");
  866. result = 0;
  867. }
  868. ret:
  869. mutex_unlock(&ipa_ctx->lock);
  870. return result;
  871. }
  872. EXPORT_SYMBOL(ipa_get_rt_tbl);
  873. /**
  874. * ipa_put_rt_tbl() - Release the specified routing table handle
  875. * @rt_tbl_hdl: [in] the routing table handle to release
  876. *
  877. * Returns: 0 on success, negative on failure
  878. *
  879. * Note: Should not be called from atomic context
  880. */
  881. int ipa_put_rt_tbl(u32 rt_tbl_hdl)
  882. {
  883. struct ipa_rt_tbl *entry = (struct ipa_rt_tbl *)rt_tbl_hdl;
  884. struct ipa_tree_node *node;
  885. enum ipa_ip_type ip = IPA_IP_MAX;
  886. int result;
  887. mutex_lock(&ipa_ctx->lock);
  888. node = ipa_search(&ipa_ctx->rt_tbl_hdl_tree, rt_tbl_hdl);
  889. if (node == NULL) {
  890. IPAERR("lookup failed\n");
  891. result = -EINVAL;
  892. goto ret;
  893. }
  894. if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE) ||
  895. entry->ref_cnt == 0) {
  896. IPAERR("bad parms\n");
  897. result = -EINVAL;
  898. goto ret;
  899. }
  900. if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
  901. ip = IPA_IP_v4;
  902. else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
  903. ip = IPA_IP_v6;
  904. else {
  905. WARN_ON(1);
  906. result = -EINVAL;
  907. goto ret;
  908. }
  909. entry->ref_cnt--;
  910. if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
  911. if (__ipa_del_rt_tbl(entry))
  912. IPAERR("fail to del RT tbl\n");
  913. /* commit for put */
  914. if (__ipa_commit_rt(ip))
  915. IPAERR("fail to commit RT tbl\n");
  916. }
  917. result = 0;
  918. ret:
  919. mutex_unlock(&ipa_ctx->lock);
  920. return result;
  921. }
  922. EXPORT_SYMBOL(ipa_put_rt_tbl);