ipa_utils.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406
  1. /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <net/ip.h>
  13. #include <linux/genalloc.h> /* gen_pool_alloc() */
  14. #include <linux/io.h>
  15. #include <linux/ratelimit.h>
  16. #include "ipa_i.h"
  17. static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
  18. IPA_OFFSET_MEQ32_1, -1 };
  19. static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
  20. IPA_OFFSET_MEQ128_1, -1 };
  21. static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
  22. IPA_IHL_OFFSET_RANGE16_1, -1 };
  23. static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
  24. IPA_IHL_OFFSET_MEQ32_1, -1 };
  25. static const int ep_mapping[IPA_MODE_MAX][IPA_CLIENT_MAX] = {
  26. { -1, -1, -1, -1, -1, 11, -1, 8, 6, 2, 1, 5, -1, -1, -1, -1, -1, 10, 9, 7, 3, 4 },
  27. { -1, -1, -1, -1, -1, 11, -1, 8, 6, 2, 1, 5, -1, -1, -1, -1, -1, 10, 9, 7, 3, 4 },
  28. { 11, 13, 15, 17, 19, -1, -1, 8, 6, 2, 1, 5, 10, 12, 14, 16, 18, -1, 9, 7, 3, 4 },
  29. { 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
  30. { 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
  31. { 19, -1, -1, -1, -1, 11, 15, 8, 6, 2, 1, 5, 14, 16, 17, 18, -1, 10, 9, 7, 3, 4 },
  32. };
  33. /**
  34. * ipa_cfg_route() - configure IPA route
  35. * @route: IPA route
  36. *
  37. * Return codes:
  38. * 0: success
  39. */
  40. int ipa_cfg_route(struct ipa_route *route)
  41. {
  42. u32 ipa_route_offset = IPA_ROUTE_OFST_v1;
  43. if (ipa_ctx->ipa_hw_type != IPA_HW_v1_0)
  44. ipa_route_offset = IPA_ROUTE_OFST_v2;
  45. ipa_inc_client_enable_clks();
  46. ipa_write_reg(ipa_ctx->mmio, ipa_route_offset,
  47. IPA_SETFIELD(route->route_dis,
  48. IPA_ROUTE_ROUTE_DIS_SHFT,
  49. IPA_ROUTE_ROUTE_DIS_BMSK) |
  50. IPA_SETFIELD(route->route_def_pipe,
  51. IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
  52. IPA_ROUTE_ROUTE_DEF_PIPE_BMSK) |
  53. IPA_SETFIELD(route->route_def_hdr_table,
  54. IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
  55. IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK) |
  56. IPA_SETFIELD(route->route_def_hdr_ofst,
  57. IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
  58. IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK));
  59. ipa_dec_client_disable_clks();
  60. return 0;
  61. }
  62. /**
  63. * ipa_cfg_filter() - configure filter
  64. * @disable: disable value
  65. *
  66. * Return codes:
  67. * 0: success
  68. */
  69. int ipa_cfg_filter(u32 disable)
  70. {
  71. u32 ipa_filter_ofst = IPA_FILTER_OFST_v1;
  72. if (ipa_ctx->ipa_hw_type != IPA_HW_v1_0)
  73. ipa_filter_ofst = IPA_FILTER_OFST_v2;
  74. ipa_inc_client_enable_clks();
  75. ipa_write_reg(ipa_ctx->mmio, ipa_filter_ofst,
  76. IPA_SETFIELD(!disable,
  77. IPA_FILTER_FILTER_EN_SHFT,
  78. IPA_FILTER_FILTER_EN_BMSK));
  79. ipa_dec_client_disable_clks();
  80. return 0;
  81. }
  82. /**
  83. * ipa_init_hw() - initialize HW
  84. *
  85. * Return codes:
  86. * 0: success
  87. */
  88. int ipa_init_hw(void)
  89. {
  90. u32 ipa_version = 0;
  91. /* do soft reset of IPA */
  92. ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1);
  93. ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0);
  94. /* enable IPA */
  95. ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1);
  96. /* Read IPA version and make sure we have access to the registers */
  97. ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST);
  98. if (ipa_version == 0)
  99. return -EFAULT;
  100. return 0;
  101. }
  102. /**
  103. * ipa_get_ep_mapping() - provide endpoint mapping
  104. * @mode: IPA operating mode
  105. * @client: client type
  106. *
  107. * Return value: endpoint mapping
  108. */
  109. int ipa_get_ep_mapping(enum ipa_operating_mode mode,
  110. enum ipa_client_type client)
  111. {
  112. return ep_mapping[mode][client];
  113. }
  114. /**
  115. * ipa_get_client_mapping() - provide client mapping
  116. * @mode: IPA operating mode
  117. * @pipe_idx: IPA end-point number
  118. *
  119. * Return value: client mapping
  120. */
  121. int ipa_get_client_mapping(enum ipa_operating_mode mode, int pipe_idx)
  122. {
  123. int i;
  124. for (i = 0; i < IPA_CLIENT_MAX; i++)
  125. if (ep_mapping[mode][i] == pipe_idx)
  126. break;
  127. return i;
  128. }
  129. /**
  130. * ipa_write_32() - convert 32 bit value to byte array
  131. * @w: 32 bit integer
  132. * @dest: byte array
  133. *
  134. * Return value: converted value
  135. */
  136. u8 *ipa_write_32(u32 w, u8 *dest)
  137. {
  138. *dest++ = (u8)((w) & 0xFF);
  139. *dest++ = (u8)((w >> 8) & 0xFF);
  140. *dest++ = (u8)((w >> 16) & 0xFF);
  141. *dest++ = (u8)((w >> 24) & 0xFF);
  142. return dest;
  143. }
  144. /**
  145. * ipa_write_16() - convert 16 bit value to byte array
  146. * @hw: 16 bit integer
  147. * @dest: byte array
  148. *
  149. * Return value: converted value
  150. */
  151. u8 *ipa_write_16(u16 hw, u8 *dest)
  152. {
  153. *dest++ = (u8)((hw) & 0xFF);
  154. *dest++ = (u8)((hw >> 8) & 0xFF);
  155. return dest;
  156. }
  157. /**
  158. * ipa_write_8() - convert 8 bit value to byte array
  159. * @hw: 8 bit integer
  160. * @dest: byte array
  161. *
  162. * Return value: converted value
  163. */
  164. u8 *ipa_write_8(u8 b, u8 *dest)
  165. {
  166. *dest++ = (b) & 0xFF;
  167. return dest;
  168. }
  169. /**
  170. * ipa_pad_to_32() - pad byte array to 32 bit value
  171. * @dest: byte array
  172. *
  173. * Return value: padded value
  174. */
  175. u8 *ipa_pad_to_32(u8 *dest)
  176. {
  177. int i = (u32)dest & 0x3;
  178. int j;
  179. if (i)
  180. for (j = 0; j < (4 - i); j++)
  181. *dest++ = 0;
  182. return dest;
  183. }
  184. /**
  185. * ipa_generate_hw_rule() - generate HW rule
  186. * @ip: IP address type
  187. * @attrib: IPA rule attribute
  188. * @buf: output buffer
  189. * @en_rule: rule
  190. *
  191. * Return codes:
  192. * 0: success
  193. * -EPERM: wrong input
  194. */
  195. int ipa_generate_hw_rule(enum ipa_ip_type ip,
  196. const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
  197. {
  198. u8 ofst_meq32 = 0;
  199. u8 ihl_ofst_rng16 = 0;
  200. u8 ihl_ofst_meq32 = 0;
  201. u8 ofst_meq128 = 0;
  202. if (ip == IPA_IP_v4) {
  203. /* error check */
  204. if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
  205. attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
  206. IPA_FLT_FLOW_LABEL) {
  207. IPAERR("v6 attrib's specified for v4 rule\n");
  208. return -EPERM;
  209. }
  210. if (attrib->attrib_mask & IPA_FLT_TOS) {
  211. *en_rule |= IPA_TOS_EQ;
  212. *buf = ipa_write_8(attrib->u.v4.tos, *buf);
  213. *buf = ipa_pad_to_32(*buf);
  214. }
  215. if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
  216. if (ipa_ofst_meq32[ofst_meq32] == -1) {
  217. IPAERR("ran out of meq32 eq\n");
  218. return -EPERM;
  219. }
  220. *en_rule |= ipa_ofst_meq32[ofst_meq32];
  221. /* 0 => offset of TOS in v4 header */
  222. *buf = ipa_write_8(0, *buf);
  223. *buf = ipa_write_32((attrib->tos_mask << 16), *buf);
  224. *buf = ipa_write_32(attrib->tos_value, *buf);
  225. *buf = ipa_pad_to_32(*buf);
  226. ofst_meq32++;
  227. }
  228. if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
  229. *en_rule |= IPA_PROTOCOL_EQ;
  230. *buf = ipa_write_8(attrib->u.v4.protocol, *buf);
  231. *buf = ipa_pad_to_32(*buf);
  232. }
  233. if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
  234. if (ipa_ofst_meq32[ofst_meq32] == -1) {
  235. IPAERR("ran out of meq32 eq\n");
  236. return -EPERM;
  237. }
  238. *en_rule |= ipa_ofst_meq32[ofst_meq32];
  239. /* 12 => offset of src ip in v4 header */
  240. *buf = ipa_write_8(12, *buf);
  241. *buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf);
  242. *buf = ipa_write_32(attrib->u.v4.src_addr, *buf);
  243. *buf = ipa_pad_to_32(*buf);
  244. ofst_meq32++;
  245. }
  246. if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
  247. if (ipa_ofst_meq32[ofst_meq32] == -1) {
  248. IPAERR("ran out of meq32 eq\n");
  249. return -EPERM;
  250. }
  251. *en_rule |= ipa_ofst_meq32[ofst_meq32];
  252. /* 16 => offset of dst ip in v4 header */
  253. *buf = ipa_write_8(16, *buf);
  254. *buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
  255. *buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
  256. *buf = ipa_pad_to_32(*buf);
  257. ofst_meq32++;
  258. }
  259. if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
  260. if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
  261. IPAERR("ran out of ihl_rng16 eq\n");
  262. return -EPERM;
  263. }
  264. if (attrib->src_port_hi < attrib->src_port_lo) {
  265. IPAERR("bad src port range param\n");
  266. return -EPERM;
  267. }
  268. *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
  269. /* 0 => offset of src port after v4 header */
  270. *buf = ipa_write_8(0, *buf);
  271. *buf = ipa_write_16(attrib->src_port_hi, *buf);
  272. *buf = ipa_write_16(attrib->src_port_lo, *buf);
  273. *buf = ipa_pad_to_32(*buf);
  274. ihl_ofst_rng16++;
  275. }
  276. if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
  277. if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
  278. IPAERR("ran out of ihl_rng16 eq\n");
  279. return -EPERM;
  280. }
  281. if (attrib->dst_port_hi < attrib->dst_port_lo) {
  282. IPAERR("bad dst port range param\n");
  283. return -EPERM;
  284. }
  285. *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
  286. /* 2 => offset of dst port after v4 header */
  287. *buf = ipa_write_8(2, *buf);
  288. *buf = ipa_write_16(attrib->dst_port_hi, *buf);
  289. *buf = ipa_write_16(attrib->dst_port_lo, *buf);
  290. *buf = ipa_pad_to_32(*buf);
  291. ihl_ofst_rng16++;
  292. }
  293. if (attrib->attrib_mask & IPA_FLT_TYPE) {
  294. if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
  295. IPAERR("ran out of ihl_meq32 eq\n");
  296. return -EPERM;
  297. }
  298. *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
  299. /* 0 => offset of type after v4 header */
  300. *buf = ipa_write_8(0, *buf);
  301. *buf = ipa_write_32(0xFF, *buf);
  302. *buf = ipa_write_32(attrib->type, *buf);
  303. *buf = ipa_pad_to_32(*buf);
  304. ihl_ofst_meq32++;
  305. }
  306. if (attrib->attrib_mask & IPA_FLT_CODE) {
  307. if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
  308. IPAERR("ran out of ihl_meq32 eq\n");
  309. return -EPERM;
  310. }
  311. *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
  312. /* 1 => offset of code after v4 header */
  313. *buf = ipa_write_8(1, *buf);
  314. *buf = ipa_write_32(0xFF, *buf);
  315. *buf = ipa_write_32(attrib->code, *buf);
  316. *buf = ipa_pad_to_32(*buf);
  317. ihl_ofst_meq32++;
  318. }
  319. if (attrib->attrib_mask & IPA_FLT_SPI) {
  320. if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
  321. IPAERR("ran out of ihl_meq32 eq\n");
  322. return -EPERM;
  323. }
  324. *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
  325. /* 0 => offset of SPI after v4 header FIXME */
  326. *buf = ipa_write_8(0, *buf);
  327. *buf = ipa_write_32(0xFFFFFFFF, *buf);
  328. *buf = ipa_write_32(attrib->spi, *buf);
  329. *buf = ipa_pad_to_32(*buf);
  330. ihl_ofst_meq32++;
  331. }
  332. if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
  333. if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
  334. IPAERR("ran out of ihl_rng16 eq\n");
  335. return -EPERM;
  336. }
  337. *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
  338. /* 0 => offset of src port after v4 header */
  339. *buf = ipa_write_8(0, *buf);
  340. *buf = ipa_write_16(attrib->src_port, *buf);
  341. *buf = ipa_write_16(attrib->src_port, *buf);
  342. *buf = ipa_pad_to_32(*buf);
  343. ihl_ofst_rng16++;
  344. }
  345. if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
  346. if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
  347. IPAERR("ran out of ihl_rng16 eq\n");
  348. return -EPERM;
  349. }
  350. *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
  351. /* 2 => offset of dst port after v4 header */
  352. *buf = ipa_write_8(2, *buf);
  353. *buf = ipa_write_16(attrib->dst_port, *buf);
  354. *buf = ipa_write_16(attrib->dst_port, *buf);
  355. *buf = ipa_pad_to_32(*buf);
  356. ihl_ofst_rng16++;
  357. }
  358. if (attrib->attrib_mask & IPA_FLT_META_DATA) {
  359. *en_rule |= IPA_METADATA_COMPARE;
  360. *buf = ipa_write_8(0, *buf); /* offset, reserved */
  361. *buf = ipa_write_32(attrib->meta_data_mask, *buf);
  362. *buf = ipa_write_32(attrib->meta_data, *buf);
  363. *buf = ipa_pad_to_32(*buf);
  364. }
  365. if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
  366. *en_rule |= IPA_IPV4_IS_FRAG;
  367. *buf = ipa_pad_to_32(*buf);
  368. }
  369. } else if (ip == IPA_IP_v6) {
  370. /* v6 code below assumes no extension headers TODO: fix this */
  371. /* error check */
  372. if (attrib->attrib_mask & IPA_FLT_TOS ||
  373. attrib->attrib_mask & IPA_FLT_PROTOCOL ||
  374. attrib->attrib_mask & IPA_FLT_FRAGMENT) {
  375. IPAERR("v4 attrib's specified for v6 rule\n");
  376. return -EPERM;
  377. }
  378. if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
  379. *en_rule |= IPA_PROTOCOL_EQ;
  380. *buf = ipa_write_8(attrib->u.v6.next_hdr, *buf);
  381. *buf = ipa_pad_to_32(*buf);
  382. }
  383. if (attrib->attrib_mask & IPA_FLT_TYPE) {
  384. if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
  385. IPAERR("ran out of ihl_meq32 eq\n");
  386. return -EPERM;
  387. }
  388. *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
  389. /* 0 => offset of type after v6 header */
  390. *buf = ipa_write_8(0, *buf);
  391. *buf = ipa_write_32(0xFF, *buf);
  392. *buf = ipa_write_32(attrib->type, *buf);
  393. *buf = ipa_pad_to_32(*buf);
  394. ihl_ofst_meq32++;
  395. }
  396. if (attrib->attrib_mask & IPA_FLT_CODE) {
  397. if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
  398. IPAERR("ran out of ihl_meq32 eq\n");
  399. return -EPERM;
  400. }
  401. *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
  402. /* 1 => offset of code after v6 header */
  403. *buf = ipa_write_8(1, *buf);
  404. *buf = ipa_write_32(0xFF, *buf);
  405. *buf = ipa_write_32(attrib->code, *buf);
  406. *buf = ipa_pad_to_32(*buf);
  407. ihl_ofst_meq32++;
  408. }
  409. if (attrib->attrib_mask & IPA_FLT_SPI) {
  410. if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
  411. IPAERR("ran out of ihl_meq32 eq\n");
  412. return -EPERM;
  413. }
  414. *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
  415. /* 0 => offset of SPI after v6 header FIXME */
  416. *buf = ipa_write_8(0, *buf);
  417. *buf = ipa_write_32(0xFFFFFFFF, *buf);
  418. *buf = ipa_write_32(attrib->spi, *buf);
  419. *buf = ipa_pad_to_32(*buf);
  420. ihl_ofst_meq32++;
  421. }
  422. if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
  423. if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
  424. IPAERR("ran out of ihl_rng16 eq\n");
  425. return -EPERM;
  426. }
  427. *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
  428. /* 0 => offset of src port after v6 header */
  429. *buf = ipa_write_8(0, *buf);
  430. *buf = ipa_write_16(attrib->src_port, *buf);
  431. *buf = ipa_write_16(attrib->src_port, *buf);
  432. *buf = ipa_pad_to_32(*buf);
  433. ihl_ofst_rng16++;
  434. }
  435. if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
  436. if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
  437. IPAERR("ran out of ihl_rng16 eq\n");
  438. return -EPERM;
  439. }
  440. *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
  441. /* 2 => offset of dst port after v6 header */
  442. *buf = ipa_write_8(2, *buf);
  443. *buf = ipa_write_16(attrib->dst_port, *buf);
  444. *buf = ipa_write_16(attrib->dst_port, *buf);
  445. *buf = ipa_pad_to_32(*buf);
  446. ihl_ofst_rng16++;
  447. }
  448. if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
  449. if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
  450. IPAERR("ran out of ihl_rng16 eq\n");
  451. return -EPERM;
  452. }
  453. if (attrib->src_port_hi < attrib->src_port_lo) {
  454. IPAERR("bad src port range param\n");
  455. return -EPERM;
  456. }
  457. *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
  458. /* 0 => offset of src port after v6 header */
  459. *buf = ipa_write_8(0, *buf);
  460. *buf = ipa_write_16(attrib->src_port_hi, *buf);
  461. *buf = ipa_write_16(attrib->src_port_lo, *buf);
  462. *buf = ipa_pad_to_32(*buf);
  463. ihl_ofst_rng16++;
  464. }
  465. if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
  466. if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
  467. IPAERR("ran out of ihl_rng16 eq\n");
  468. return -EPERM;
  469. }
  470. if (attrib->dst_port_hi < attrib->dst_port_lo) {
  471. IPAERR("bad dst port range param\n");
  472. return -EPERM;
  473. }
  474. *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
  475. /* 2 => offset of dst port after v6 header */
  476. *buf = ipa_write_8(2, *buf);
  477. *buf = ipa_write_16(attrib->dst_port_hi, *buf);
  478. *buf = ipa_write_16(attrib->dst_port_lo, *buf);
  479. *buf = ipa_pad_to_32(*buf);
  480. ihl_ofst_rng16++;
  481. }
  482. if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
  483. if (ipa_ofst_meq128[ofst_meq128] == -1) {
  484. IPAERR("ran out of meq128 eq\n");
  485. return -EPERM;
  486. }
  487. *en_rule |= ipa_ofst_meq128[ofst_meq128];
  488. /* 8 => offset of src ip in v6 header */
  489. *buf = ipa_write_8(8, *buf);
  490. *buf = ipa_write_32(attrib->u.v6.src_addr_mask[0],
  491. *buf);
  492. *buf = ipa_write_32(attrib->u.v6.src_addr_mask[1],
  493. *buf);
  494. *buf = ipa_write_32(attrib->u.v6.src_addr_mask[2],
  495. *buf);
  496. *buf = ipa_write_32(attrib->u.v6.src_addr_mask[3],
  497. *buf);
  498. *buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf);
  499. *buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf);
  500. *buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf);
  501. *buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf);
  502. *buf = ipa_pad_to_32(*buf);
  503. ofst_meq128++;
  504. }
  505. if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
  506. if (ipa_ofst_meq128[ofst_meq128] == -1) {
  507. IPAERR("ran out of meq128 eq\n");
  508. return -EPERM;
  509. }
  510. *en_rule |= ipa_ofst_meq128[ofst_meq128];
  511. /* 24 => offset of dst ip in v6 header */
  512. *buf = ipa_write_8(24, *buf);
  513. *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0],
  514. *buf);
  515. *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1],
  516. *buf);
  517. *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2],
  518. *buf);
  519. *buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3],
  520. *buf);
  521. *buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf);
  522. *buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf);
  523. *buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf);
  524. *buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf);
  525. *buf = ipa_pad_to_32(*buf);
  526. ofst_meq128++;
  527. }
  528. if (attrib->attrib_mask & IPA_FLT_TC) {
  529. *en_rule |= IPA_FLT_TC;
  530. *buf = ipa_write_8(attrib->u.v6.tc, *buf);
  531. *buf = ipa_pad_to_32(*buf);
  532. }
  533. if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
  534. if (ipa_ofst_meq32[ofst_meq32] == -1) {
  535. IPAERR("ran out of meq32 eq\n");
  536. return -EPERM;
  537. }
  538. *en_rule |= ipa_ofst_meq32[ofst_meq32];
  539. /* 0 => offset of TOS in v4 header */
  540. *buf = ipa_write_8(0, *buf);
  541. *buf = ipa_write_32((attrib->tos_mask << 20), *buf);
  542. *buf = ipa_write_32(attrib->tos_value, *buf);
  543. *buf = ipa_pad_to_32(*buf);
  544. ofst_meq32++;
  545. }
  546. if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
  547. *en_rule |= IPA_FLT_FLOW_LABEL;
  548. /* FIXME FL is only 20 bits */
  549. *buf = ipa_write_32(attrib->u.v6.flow_label, *buf);
  550. *buf = ipa_pad_to_32(*buf);
  551. }
  552. if (attrib->attrib_mask & IPA_FLT_META_DATA) {
  553. *en_rule |= IPA_METADATA_COMPARE;
  554. *buf = ipa_write_8(0, *buf); /* offset, reserved */
  555. *buf = ipa_write_32(attrib->meta_data_mask, *buf);
  556. *buf = ipa_write_32(attrib->meta_data, *buf);
  557. *buf = ipa_pad_to_32(*buf);
  558. }
  559. } else {
  560. IPAERR("unsupported ip %d\n", ip);
  561. return -EPERM;
  562. }
  563. /*
  564. * default "rule" means no attributes set -> map to
  565. * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
  566. */
  567. if (attrib->attrib_mask == 0) {
  568. if (ipa_ofst_meq32[ofst_meq32] == -1) {
  569. IPAERR("ran out of meq32 eq\n");
  570. return -EPERM;
  571. }
  572. *en_rule |= ipa_ofst_meq32[ofst_meq32];
  573. *buf = ipa_write_8(0, *buf); /* offset */
  574. *buf = ipa_write_32(0, *buf); /* mask */
  575. *buf = ipa_write_32(0, *buf); /* val */
  576. *buf = ipa_pad_to_32(*buf);
  577. ofst_meq32++;
  578. }
  579. return 0;
  580. }
  581. /**
  582. * ipa_cfg_ep - IPA end-point configuration
  583. * @clnt_hdl: [in] opaque client handle assigned by IPA to client
  584. * @ipa_ep_cfg: [in] IPA end-point configuration params
  585. *
  586. * This includes nat, header, mode, aggregation and route settings and is a one
  587. * shot API to configure the IPA end-point fully
  588. *
  589. * Returns: 0 on success, negative on failure
  590. *
  591. * Note: Should not be called from atomic context
  592. */
  593. int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
  594. {
  595. int result = -EINVAL;
  596. if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
  597. ipa_ep_cfg == NULL) {
  598. IPAERR("bad parm.\n");
  599. return -EINVAL;
  600. }
  601. result = ipa_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
  602. if (result)
  603. return result;
  604. result = ipa_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
  605. if (result)
  606. return result;
  607. if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
  608. result = ipa_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
  609. if (result)
  610. return result;
  611. result = ipa_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
  612. if (result)
  613. return result;
  614. result = ipa_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
  615. if (result)
  616. return result;
  617. }
  618. return 0;
  619. }
  620. EXPORT_SYMBOL(ipa_cfg_ep);
  621. /**
  622. * ipa_cfg_ep_nat() - IPA end-point NAT configuration
  623. * @clnt_hdl: [in] opaque client handle assigned by IPA to client
  624. * @ipa_ep_cfg: [in] IPA end-point configuration params
  625. *
  626. * Returns: 0 on success, negative on failure
  627. *
  628. * Note: Should not be called from atomic context
  629. */
  630. int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg)
  631. {
  632. if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
  633. ipa_ep_cfg == NULL) {
  634. IPAERR("bad parm.\n");
  635. return -EINVAL;
  636. }
  637. if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
  638. IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
  639. return -EINVAL;
  640. }
  641. /* copy over EP cfg */
  642. ipa_ctx->ep[clnt_hdl].cfg.nat = *ipa_ep_cfg;
  643. ipa_inc_client_enable_clks();
  644. /* clnt_hdl is used as pipe_index */
  645. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
  646. ipa_write_reg(ipa_ctx->mmio,
  647. IPA_ENDP_INIT_NAT_n_OFST_v1(clnt_hdl),
  648. IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.nat.nat_en,
  649. IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
  650. IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK));
  651. else
  652. ipa_write_reg(ipa_ctx->mmio,
  653. IPA_ENDP_INIT_NAT_n_OFST_v2(clnt_hdl),
  654. IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.nat.nat_en,
  655. IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
  656. IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK));
  657. ipa_dec_client_disable_clks();
  658. return 0;
  659. }
  660. EXPORT_SYMBOL(ipa_cfg_ep_nat);
  661. /**
  662. * ipa_cfg_ep_hdr() - IPA end-point header configuration
  663. * @clnt_hdl: [in] opaque client handle assigned by IPA to client
  664. * @ipa_ep_cfg: [in] IPA end-point configuration params
  665. *
  666. * Returns: 0 on success, negative on failure
  667. *
  668. * Note: Should not be called from atomic context
  669. */
  670. int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg)
  671. {
  672. u32 val;
  673. struct ipa_ep_context *ep;
  674. if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
  675. ipa_ep_cfg == NULL) {
  676. IPAERR("bad parm.\n");
  677. return -EINVAL;
  678. }
  679. ep = &ipa_ctx->ep[clnt_hdl];
  680. /* copy over EP cfg */
  681. ep->cfg.hdr = *ipa_ep_cfg;
  682. val = IPA_SETFIELD(ep->cfg.hdr.hdr_len,
  683. IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT,
  684. IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK) |
  685. IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_metadata_valid,
  686. IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT,
  687. IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK) |
  688. IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_metadata,
  689. IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT,
  690. IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK) |
  691. IPA_SETFIELD(ep->cfg.hdr.hdr_additional_const_len,
  692. IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT,
  693. IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK) |
  694. IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_pkt_size_valid,
  695. IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT,
  696. IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK) |
  697. IPA_SETFIELD(ep->cfg.hdr.hdr_ofst_pkt_size,
  698. IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT,
  699. IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK) |
  700. IPA_SETFIELD(ep->cfg.hdr.hdr_a5_mux,
  701. IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
  702. IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
  703. ipa_inc_client_enable_clks();
  704. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
  705. ipa_write_reg(ipa_ctx->mmio,
  706. IPA_ENDP_INIT_HDR_n_OFST_v1(clnt_hdl), val);
  707. else
  708. ipa_write_reg(ipa_ctx->mmio,
  709. IPA_ENDP_INIT_HDR_n_OFST_v2(clnt_hdl), val);
  710. ipa_dec_client_disable_clks();
  711. return 0;
  712. }
  713. EXPORT_SYMBOL(ipa_cfg_ep_hdr);
  714. /**
  715. * ipa_cfg_ep_mode() - IPA end-point mode configuration
  716. * @clnt_hdl: [in] opaque client handle assigned by IPA to client
  717. * @ipa_ep_cfg: [in] IPA end-point configuration params
  718. *
  719. * Returns: 0 on success, negative on failure
  720. *
  721. * Note: Should not be called from atomic context
  722. */
  723. int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg)
  724. {
  725. u32 val;
  726. int ep;
  727. if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
  728. ipa_ep_cfg == NULL) {
  729. IPAERR("bad parm.\n");
  730. return -EINVAL;
  731. }
  732. if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
  733. IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
  734. return -EINVAL;
  735. }
  736. ep = ipa_get_ep_mapping(ipa_ctx->mode, ipa_ep_cfg->dst);
  737. if (ep == -1 && ipa_ep_cfg->mode == IPA_DMA) {
  738. IPAERR("dst %d does not exist in mode %d\n", ipa_ep_cfg->dst,
  739. ipa_ctx->mode);
  740. return -EINVAL;
  741. }
  742. /* copy over EP cfg */
  743. ipa_ctx->ep[clnt_hdl].cfg.mode = *ipa_ep_cfg;
  744. ipa_ctx->ep[clnt_hdl].dst_pipe_index = ep;
  745. val = IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.mode.mode,
  746. IPA_ENDP_INIT_MODE_n_MODE_SHFT,
  747. IPA_ENDP_INIT_MODE_n_MODE_BMSK) |
  748. IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].dst_pipe_index,
  749. IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
  750. IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
  751. ipa_inc_client_enable_clks();
  752. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
  753. ipa_write_reg(ipa_ctx->mmio,
  754. IPA_ENDP_INIT_MODE_n_OFST_v1(clnt_hdl), val);
  755. else
  756. ipa_write_reg(ipa_ctx->mmio,
  757. IPA_ENDP_INIT_MODE_n_OFST_v2(clnt_hdl), val);
  758. ipa_dec_client_disable_clks();
  759. return 0;
  760. }
  761. EXPORT_SYMBOL(ipa_cfg_ep_mode);
  762. /**
  763. * ipa_cfg_ep_aggr() - IPA end-point aggregation configuration
  764. * @clnt_hdl: [in] opaque client handle assigned by IPA to client
  765. * @ipa_ep_cfg: [in] IPA end-point configuration params
  766. *
  767. * Returns: 0 on success, negative on failure
  768. *
  769. * Note: Should not be called from atomic context
  770. */
  771. int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg)
  772. {
  773. u32 val;
  774. if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
  775. ipa_ep_cfg == NULL) {
  776. IPAERR("bad parm.\n");
  777. return -EINVAL;
  778. }
  779. /* copy over EP cfg */
  780. ipa_ctx->ep[clnt_hdl].cfg.aggr = *ipa_ep_cfg;
  781. val = IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_en,
  782. IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
  783. IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) |
  784. IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr,
  785. IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
  786. IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) |
  787. IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_byte_limit,
  788. IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
  789. IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) |
  790. IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].cfg.aggr.aggr_time_limit,
  791. IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
  792. IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
  793. ipa_inc_client_enable_clks();
  794. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
  795. ipa_write_reg(ipa_ctx->mmio,
  796. IPA_ENDP_INIT_AGGR_n_OFST_v1(clnt_hdl), val);
  797. else
  798. ipa_write_reg(ipa_ctx->mmio,
  799. IPA_ENDP_INIT_AGGR_n_OFST_v2(clnt_hdl), val);
  800. ipa_dec_client_disable_clks();
  801. return 0;
  802. }
  803. EXPORT_SYMBOL(ipa_cfg_ep_aggr);
  804. /**
  805. * ipa_cfg_ep_route() - IPA end-point routing configuration
  806. * @clnt_hdl: [in] opaque client handle assigned by IPA to client
  807. * @ipa_ep_cfg: [in] IPA end-point configuration params
  808. *
  809. * Returns: 0 on success, negative on failure
  810. *
  811. * Note: Should not be called from atomic context
  812. */
  813. int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg)
  814. {
  815. if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
  816. ipa_ep_cfg == NULL) {
  817. IPAERR("bad parm.\n");
  818. return -EINVAL;
  819. }
  820. if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
  821. IPAERR("ROUTE does not apply to IPA out EP %d\n", clnt_hdl);
  822. return -EINVAL;
  823. }
  824. /*
  825. * if DMA mode was configured previously for this EP, return with
  826. * success
  827. */
  828. if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
  829. IPADBG("DMA mode for EP %d\n", clnt_hdl);
  830. return 0;
  831. }
  832. if (ipa_ep_cfg->rt_tbl_hdl)
  833. IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
  834. /* always use the "default" routing tables whose indices are 0 */
  835. ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
  836. ipa_inc_client_enable_clks();
  837. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
  838. ipa_write_reg(ipa_ctx->mmio,
  839. IPA_ENDP_INIT_ROUTE_n_OFST_v1(clnt_hdl),
  840. IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].rt_tbl_idx,
  841. IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
  842. IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK));
  843. } else {
  844. ipa_write_reg(ipa_ctx->mmio,
  845. IPA_ENDP_INIT_ROUTE_n_OFST_v2(clnt_hdl),
  846. IPA_SETFIELD(ipa_ctx->ep[clnt_hdl].rt_tbl_idx,
  847. IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
  848. IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK));
  849. }
  850. ipa_dec_client_disable_clks();
  851. return 0;
  852. }
  853. EXPORT_SYMBOL(ipa_cfg_ep_route);
  854. /**
  855. * ipa_cfg_ep_holb() - IPA end-point holb configuration
  856. *
  857. * If an IPA producer pipe is full, IPA HW by default will block
  858. * indefinitely till space opens up. During this time no packets
  859. * including those from unrelated pipes will be processed. Enabling
  860. * HOLB means IPA HW will be allowed to drop packets as/when needed
  861. * and indefinite blocking is avoided.
  862. *
  863. * @clnt_hdl: [in] opaque client handle assigned by IPA to client
  864. * @ipa_ep_cfg: [in] IPA end-point configuration params
  865. *
  866. * Returns: 0 on success, negative on failure
  867. */
  868. int ipa_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg)
  869. {
  870. if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0 ||
  871. ipa_ep_cfg == NULL || ipa_ep_cfg->tmr_val > 511 ||
  872. ipa_ep_cfg->en > 1) {
  873. IPAERR("bad parm.\n");
  874. return -EINVAL;
  875. }
  876. if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
  877. IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl);
  878. return -EINVAL;
  879. }
  880. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
  881. IPAERR("per EP HOLB not supported\n");
  882. return -EPERM;
  883. } else {
  884. ipa_ctx->ep[clnt_hdl].holb = *ipa_ep_cfg;
  885. ipa_inc_client_enable_clks();
  886. ipa_write_reg(ipa_ctx->mmio,
  887. IPA_ENDP_INIT_HOL_BLOCK_EN_n_OFST(clnt_hdl),
  888. ipa_ep_cfg->en);
  889. ipa_write_reg(ipa_ctx->mmio,
  890. IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_OFST(clnt_hdl),
  891. ipa_ep_cfg->tmr_val);
  892. ipa_dec_client_disable_clks();
  893. IPAERR("cfg holb %u ep=%d tmr=%d\n", ipa_ep_cfg->en, clnt_hdl,
  894. ipa_ep_cfg->tmr_val);
  895. }
  896. return 0;
  897. }
  898. EXPORT_SYMBOL(ipa_cfg_ep_holb);
  899. /**
  900. * ipa_cfg_ep_holb_by_client() - IPA end-point holb configuration
  901. *
  902. * Wrapper function for ipa_cfg_ep_holb() with client name instead of
  903. * client handle. This function is used for clients that does not have
  904. * client handle.
  905. *
  906. * @client: [in] client name
  907. * @ipa_ep_cfg: [in] IPA end-point configuration params
  908. *
  909. * Returns: 0 on success, negative on failure
  910. */
  911. int ipa_cfg_ep_holb_by_client(enum ipa_client_type client,
  912. const struct ipa_ep_cfg_holb *ipa_ep_cfg)
  913. {
  914. return ipa_cfg_ep_holb(ipa_get_ep_mapping(ipa_ctx->mode, client),
  915. ipa_ep_cfg);
  916. }
  917. EXPORT_SYMBOL(ipa_cfg_ep_holb_by_client);
  918. /**
  919. * ipa_dump_buff_internal() - dumps buffer for debug purposes
  920. * @base: buffer base address
  921. * @phy_base: buffer physical base address
  922. * @size: size of the buffer
  923. */
  924. void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
  925. {
  926. int i;
  927. u32 *cur = (u32 *)base;
  928. u8 *byt;
  929. IPADBG("START phys=%x\n", phy_base);
  930. for (i = 0; i < size / 4; i++) {
  931. byt = (u8 *)(cur + i);
  932. IPADBG("%2d %08x %02x %02x %02x %02x\n", i, *(cur + i),
  933. byt[0], byt[1], byt[2], byt[3]);
  934. }
  935. IPADBG("END\n");
  936. }
  937. /**
  938. * ipa_dump() - dumps part of driver data structures for debug purposes
  939. */
  940. void ipa_dump(void)
  941. {
  942. struct ipa_mem_buffer hdr_mem = { 0 };
  943. struct ipa_mem_buffer rt_mem = { 0 };
  944. struct ipa_mem_buffer flt_mem = { 0 };
  945. mutex_lock(&ipa_ctx->lock);
  946. if (ipa_generate_hdr_hw_tbl(&hdr_mem))
  947. IPAERR("fail\n");
  948. if (ipa_generate_rt_hw_tbl(IPA_IP_v4, &rt_mem))
  949. IPAERR("fail\n");
  950. if (ipa_generate_flt_hw_tbl(IPA_IP_v4, &flt_mem))
  951. IPAERR("fail\n");
  952. IPAERR("PHY hdr=%x rt=%x flt=%x\n", hdr_mem.phys_base, rt_mem.phys_base,
  953. flt_mem.phys_base);
  954. IPAERR("VIRT hdr=%x rt=%x flt=%x\n", (u32)hdr_mem.base,
  955. (u32)rt_mem.base, (u32)flt_mem.base);
  956. IPAERR("SIZE hdr=%d rt=%d flt=%d\n", hdr_mem.size, rt_mem.size,
  957. flt_mem.size);
  958. IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
  959. IPA_DUMP_BUFF(rt_mem.base, rt_mem.phys_base, rt_mem.size);
  960. IPA_DUMP_BUFF(flt_mem.base, flt_mem.phys_base, flt_mem.size);
  961. if (hdr_mem.phys_base)
  962. dma_free_coherent(NULL, hdr_mem.size, hdr_mem.base,
  963. hdr_mem.phys_base);
  964. if (rt_mem.phys_base)
  965. dma_free_coherent(NULL, rt_mem.size, rt_mem.base,
  966. rt_mem.phys_base);
  967. if (flt_mem.phys_base)
  968. dma_free_coherent(NULL, flt_mem.size, flt_mem.base,
  969. flt_mem.phys_base);
  970. mutex_unlock(&ipa_ctx->lock);
  971. }
  972. /**
  973. * ipa_search() - search for handle in RB tree
  974. * @root: tree root
  975. * @hdl: handle
  976. *
  977. * Return value: tree node corresponding to the handle
  978. */
  979. struct ipa_tree_node *ipa_search(struct rb_root *root, u32 hdl)
  980. {
  981. struct rb_node *node = root->rb_node;
  982. while (node) {
  983. struct ipa_tree_node *data = container_of(node,
  984. struct ipa_tree_node, node);
  985. if (hdl < data->hdl)
  986. node = node->rb_left;
  987. else if (hdl > data->hdl)
  988. node = node->rb_right;
  989. else
  990. return data;
  991. }
  992. return NULL;
  993. }
  994. /**
  995. * ipa_insert() - insert new node to RB tree
  996. * @root: tree root
  997. * @data: new data to insert
  998. *
  999. * Return value:
  1000. * 0: success
  1001. * -EPERM: tree already contains the node with provided handle
  1002. */
  1003. int ipa_insert(struct rb_root *root, struct ipa_tree_node *data)
  1004. {
  1005. struct rb_node **new = &(root->rb_node), *parent = NULL;
  1006. /* Figure out where to put new node */
  1007. while (*new) {
  1008. struct ipa_tree_node *this = container_of(*new,
  1009. struct ipa_tree_node, node);
  1010. parent = *new;
  1011. if (data->hdl < this->hdl)
  1012. new = &((*new)->rb_left);
  1013. else if (data->hdl > this->hdl)
  1014. new = &((*new)->rb_right);
  1015. else
  1016. return -EPERM;
  1017. }
  1018. /* Add new node and rebalance tree. */
  1019. rb_link_node(&data->node, parent, new);
  1020. rb_insert_color(&data->node, root);
  1021. return 0;
  1022. }
  1023. /**
  1024. * ipa_pipe_mem_init() - initialize the pipe memory
  1025. * @start_ofst: start offset
  1026. * @size: size
  1027. *
  1028. * Return value:
  1029. * 0: success
  1030. * -ENOMEM: no memory
  1031. */
  1032. int ipa_pipe_mem_init(u32 start_ofst, u32 size)
  1033. {
  1034. int res;
  1035. u32 aligned_start_ofst;
  1036. u32 aligned_size;
  1037. struct gen_pool *pool;
  1038. if (!size) {
  1039. IPAERR("no IPA pipe mem alloted\n");
  1040. goto fail;
  1041. }
  1042. aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst);
  1043. aligned_size = size - (aligned_start_ofst - start_ofst);
  1044. IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
  1045. start_ofst, aligned_start_ofst, size, aligned_size);
  1046. /* allocation order of 8 i.e. 128 bytes, global pool */
  1047. pool = gen_pool_create(8, -1);
  1048. if (!pool) {
  1049. IPAERR("Failed to create a new memory pool.\n");
  1050. goto fail;
  1051. }
  1052. res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
  1053. if (res) {
  1054. IPAERR("Failed to add memory to IPA pipe pool\n");
  1055. goto err_pool_add;
  1056. }
  1057. ipa_ctx->pipe_mem_pool = pool;
  1058. return 0;
  1059. err_pool_add:
  1060. gen_pool_destroy(pool);
  1061. fail:
  1062. return -ENOMEM;
  1063. }
  1064. /**
  1065. * ipa_pipe_mem_alloc() - allocate pipe memory
  1066. * @ofst: offset
  1067. * @size: size
  1068. *
  1069. * Return value:
  1070. * 0: success
  1071. */
  1072. int ipa_pipe_mem_alloc(u32 *ofst, u32 size)
  1073. {
  1074. u32 vaddr;
  1075. int res = -1;
  1076. if (!ipa_ctx->pipe_mem_pool || !size) {
  1077. IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
  1078. ipa_ctx->pipe_mem_pool);
  1079. return res;
  1080. }
  1081. vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size);
  1082. if (vaddr) {
  1083. *ofst = vaddr;
  1084. res = 0;
  1085. IPADBG("size=%u ofst=%u\n", size, vaddr);
  1086. } else {
  1087. IPAERR("size=%u failed\n", size);
  1088. }
  1089. return res;
  1090. }
  1091. /**
  1092. * ipa_pipe_mem_free() - free pipe memory
  1093. * @ofst: offset
  1094. * @size: size
  1095. *
  1096. * Return value:
  1097. * 0: success
  1098. */
  1099. int ipa_pipe_mem_free(u32 ofst, u32 size)
  1100. {
  1101. IPADBG("size=%u ofst=%u\n", size, ofst);
  1102. if (ipa_ctx->pipe_mem_pool && size)
  1103. gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size);
  1104. return 0;
  1105. }
  1106. /**
  1107. * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting
  1108. * @mode: [in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
  1109. * etc
  1110. *
  1111. * Returns: 0 on success
  1112. */
  1113. int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
  1114. {
  1115. u32 reg_val;
  1116. ipa_inc_client_enable_clks();
  1117. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
  1118. reg_val = ipa_read_reg(ipa_ctx->mmio,
  1119. IPA_AGGREGATION_SPARE_REG_2_OFST);
  1120. ipa_write_reg(ipa_ctx->mmio,
  1121. IPA_AGGREGATION_SPARE_REG_2_OFST,
  1122. ((mode & IPA_AGGREGATION_MODE_MSK) <<
  1123. IPA_AGGREGATION_MODE_SHFT) |
  1124. (reg_val & IPA_AGGREGATION_MODE_BMSK));
  1125. } else {
  1126. reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
  1127. ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, (mode & 0x1) |
  1128. (reg_val & 0xfffffffe));
  1129. }
  1130. ipa_dec_client_disable_clks();
  1131. return 0;
  1132. }
  1133. EXPORT_SYMBOL(ipa_set_aggr_mode);
  1134. /**
  1135. * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
  1136. * mode
  1137. * @sig: [in] the first 3 bytes of QCNCM NDP signature (expected to be
  1138. * "QND")
  1139. *
  1140. * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
  1141. * (expected to be 'P') needs to be set using the header addition mechanism
  1142. *
  1143. * Returns: 0 on success, negative on failure
  1144. */
  1145. int ipa_set_qcncm_ndp_sig(char sig[3])
  1146. {
  1147. u32 reg_val;
  1148. if (sig == NULL) {
  1149. IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
  1150. return -EINVAL;
  1151. }
  1152. ipa_inc_client_enable_clks();
  1153. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
  1154. reg_val = ipa_read_reg(ipa_ctx->mmio,
  1155. IPA_AGGREGATION_SPARE_REG_2_OFST);
  1156. ipa_write_reg(ipa_ctx->mmio,
  1157. IPA_AGGREGATION_SPARE_REG_2_OFST, sig[0] <<
  1158. IPA_AGGREGATION_QCNCM_SIG0_SHFT |
  1159. (sig[1] << IPA_AGGREGATION_QCNCM_SIG1_SHFT) |
  1160. sig[2] |
  1161. (reg_val & IPA_AGGREGATION_QCNCM_SIG_BMSK));
  1162. } else {
  1163. reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
  1164. ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, sig[0] << 20 |
  1165. (sig[1] << 12) | (sig[2] << 4) |
  1166. (reg_val & 0xf000000f));
  1167. }
  1168. ipa_dec_client_disable_clks();
  1169. return 0;
  1170. }
  1171. EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig);
  1172. /**
  1173. * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
  1174. * configuration
  1175. * @enable: [in] true for single NDP/MBIM; false otherwise
  1176. *
  1177. * Returns: 0 on success
  1178. */
  1179. int ipa_set_single_ndp_per_mbim(bool enable)
  1180. {
  1181. u32 reg_val;
  1182. ipa_inc_client_enable_clks();
  1183. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
  1184. reg_val = ipa_read_reg(ipa_ctx->mmio,
  1185. IPA_AGGREGATION_SPARE_REG_1_OFST);
  1186. ipa_write_reg(ipa_ctx->mmio,
  1187. IPA_AGGREGATION_SPARE_REG_1_OFST, (enable &
  1188. IPA_AGGREGATION_SINGLE_NDP_MSK) |
  1189. (reg_val & IPA_AGGREGATION_SINGLE_NDP_BMSK));
  1190. } else {
  1191. reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST);
  1192. ipa_write_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST,
  1193. (enable & 0x1) | (reg_val & 0xfffffffe));
  1194. }
  1195. ipa_dec_client_disable_clks();
  1196. return 0;
  1197. }
  1198. EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim);
  1199. /**
  1200. * ipa_set_hw_timer_fix_for_mbim_aggr() - Enable/disable HW timer fix
  1201. * for MBIM aggregation.
  1202. * @enable: [in] true for enable HW fix; false otherwise
  1203. *
  1204. * Returns: 0 on success
  1205. */
  1206. int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable)
  1207. {
  1208. u32 reg_val;
  1209. ipa_inc_client_enable_clks();
  1210. reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
  1211. ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST,
  1212. (enable << IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT) |
  1213. (reg_val & ~IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK));
  1214. ipa_dec_client_disable_clks();
  1215. return 0;
  1216. }
  1217. EXPORT_SYMBOL(ipa_set_hw_timer_fix_for_mbim_aggr);
  1218. /**
  1219. * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary
  1220. * @start: start address of the memory buffer
  1221. * @end: end address of the memory buffer
  1222. * @boundary: boundary
  1223. *
  1224. * Return value:
  1225. * 1: if the interval [start, end] straddles boundary
  1226. * 0: otherwise
  1227. */
  1228. int ipa_straddle_boundary(u32 start, u32 end, u32 boundary)
  1229. {
  1230. u32 next_start;
  1231. u32 prev_end;
  1232. IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
  1233. next_start = (start + (boundary - 1)) & ~(boundary - 1);
  1234. prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
  1235. while (next_start < prev_end)
  1236. next_start += boundary;
  1237. if (next_start == prev_end)
  1238. return 1;
  1239. else
  1240. return 0;
  1241. }
  1242. /**
  1243. * ipa_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM
  1244. *
  1245. * Function is rate limited to avoid flooding kernel log buffer
  1246. */
  1247. void ipa_bam_reg_dump(void)
  1248. {
  1249. static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
  1250. if (__ratelimit(&_rs)) {
  1251. ipa_inc_client_enable_clks();
  1252. pr_err("IPA BAM START\n");
  1253. sps_get_bam_debug_info(ipa_ctx->bam_handle, 5, 479182, 0, 0);
  1254. sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, 0, 0, 0);
  1255. ipa_dec_client_disable_clks();
  1256. }
  1257. }
  1258. EXPORT_SYMBOL(ipa_bam_reg_dump);