ipa.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341
  1. /* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/clk.h>
  13. #include <linux/device.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/fs.h>
  16. #include <linux/genalloc.h>
  17. #include <linux/init.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/rbtree.h>
  24. #include <linux/uaccess.h>
  25. #include <mach/msm_bus.h>
  26. #include <mach/msm_bus_board.h>
  27. #include "ipa_i.h"
  28. #include "ipa_rm_i.h"
  29. #define IPA_SUMMING_THRESHOLD (0x10)
  30. #define IPA_PIPE_MEM_START_OFST (0x0)
  31. #define IPA_PIPE_MEM_SIZE (0x0)
  32. #define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
  33. x == IPA_MODE_MOBILE_AP_WAN || \
  34. x == IPA_MODE_MOBILE_AP_WLAN)
  35. #define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
  36. #define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL)
  37. #define IPA_V1_1_CLK_RATE (100 * 1000 * 1000UL)
  38. #define IPA_DEFAULT_HEADER_LENGTH (8)
  39. #define IPA_DMA_POOL_SIZE (512)
  40. #define IPA_DMA_POOL_ALIGNMENT (4)
  41. #define IPA_DMA_POOL_BOUNDARY (1024)
  42. #define IPA_NUM_DESC_PER_SW_TX (2)
  43. #define IPA_ROUTING_RULE_BYTE_SIZE (4)
  44. #define IPA_BAM_CNFG_BITS_VAL (0x7FFFE004)
  45. #define IPA_AGGR_MAX_STR_LENGTH (10)
  46. #define IPA_AGGR_STR_IN_BYTES(str) \
  47. (strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
  48. static struct ipa_plat_drv_res ipa_res = {0, };
  49. static struct of_device_id ipa_plat_drv_match[] = {
  50. {
  51. .compatible = "qcom,ipa",
  52. },
  53. {
  54. }
  55. };
  56. static struct clk *ipa_clk_src;
  57. static struct clk *ipa_clk;
  58. static struct clk *sys_noc_ipa_axi_clk;
  59. static struct clk *ipa_cnoc_clk;
  60. static struct clk *ipa_inactivity_clk;
  61. static struct msm_bus_vectors ipa_init_vectors[] = {
  62. {
  63. .src = MSM_BUS_MASTER_IPA,
  64. .dst = MSM_BUS_SLAVE_EBI_CH0,
  65. .ab = 0,
  66. .ib = 0,
  67. },
  68. {
  69. .src = MSM_BUS_MASTER_BAM_DMA,
  70. .dst = MSM_BUS_SLAVE_EBI_CH0,
  71. .ab = 0,
  72. .ib = 0,
  73. },
  74. {
  75. .src = MSM_BUS_MASTER_BAM_DMA,
  76. .dst = MSM_BUS_SLAVE_OCIMEM,
  77. .ab = 0,
  78. .ib = 0,
  79. },
  80. };
  81. static struct msm_bus_vectors ipa_max_perf_vectors[] = {
  82. {
  83. .src = MSM_BUS_MASTER_IPA,
  84. .dst = MSM_BUS_SLAVE_EBI_CH0,
  85. .ab = 50000000,
  86. .ib = 960000000,
  87. },
  88. {
  89. .src = MSM_BUS_MASTER_BAM_DMA,
  90. .dst = MSM_BUS_SLAVE_EBI_CH0,
  91. .ab = 50000000,
  92. .ib = 960000000,
  93. },
  94. {
  95. .src = MSM_BUS_MASTER_BAM_DMA,
  96. .dst = MSM_BUS_SLAVE_OCIMEM,
  97. .ab = 50000000,
  98. .ib = 960000000,
  99. },
  100. };
  101. static struct msm_bus_paths ipa_usecases[] = {
  102. {
  103. ARRAY_SIZE(ipa_init_vectors),
  104. ipa_init_vectors,
  105. },
  106. {
  107. ARRAY_SIZE(ipa_max_perf_vectors),
  108. ipa_max_perf_vectors,
  109. },
  110. };
  111. static struct msm_bus_scale_pdata ipa_bus_client_pdata = {
  112. ipa_usecases,
  113. ARRAY_SIZE(ipa_usecases),
  114. .name = "ipa",
  115. };
  116. static uint32_t ipa_bus_hdl;
  117. static struct device *ipa_dev;
  118. struct ipa_context *ipa_ctx;
  119. static bool polling_mode;
  120. module_param(polling_mode, bool, 0644);
  121. MODULE_PARM_DESC(polling_mode,
  122. "1 - pure polling mode; 0 - interrupt+polling mode");
  123. static uint polling_delay_ms = 50;
  124. module_param(polling_delay_ms, uint, 0644);
  125. MODULE_PARM_DESC(polling_delay_ms, "set to desired delay between polls");
  126. static bool hdr_tbl_lcl = 1;
  127. module_param(hdr_tbl_lcl, bool, 0644);
  128. MODULE_PARM_DESC(hdr_tbl_lcl, "where hdr tbl resides 1-local; 0-system");
  129. static bool ip4_rt_tbl_lcl;
  130. module_param(ip4_rt_tbl_lcl, bool, 0644);
  131. MODULE_PARM_DESC(ip4_rt_tbl_lcl,
  132. "where ip4 rt tables reside 1-local; 0-system");
  133. static bool ip6_rt_tbl_lcl;
  134. module_param(ip6_rt_tbl_lcl, bool, 0644);
  135. MODULE_PARM_DESC(ip6_rt_tbl_lcl,
  136. "where ip6 rt tables reside 1-local; 0-system");
  137. static bool ip4_flt_tbl_lcl = 1;
  138. module_param(ip4_flt_tbl_lcl, bool, 0644);
  139. MODULE_PARM_DESC(ip4_flt_tbl_lcl,
  140. "where ip4 flt tables reside 1-local; 0-system");
  141. static bool ip6_flt_tbl_lcl = 1;
  142. module_param(ip6_flt_tbl_lcl, bool, 0644);
  143. MODULE_PARM_DESC(ip6_flt_tbl_lcl,
  144. "where ip6 flt tables reside 1-local; 0-system");
  145. static int ipa_load_pipe_connection(struct platform_device *pdev,
  146. enum a2_mux_pipe_direction pipe_dir,
  147. struct a2_mux_pipe_connection *pdata);
  148. static int ipa_update_connections_info(struct device_node *node,
  149. struct a2_mux_pipe_connection *pipe_connection);
  150. static void ipa_set_aggregation_params(void);
  151. static int ipa_open(struct inode *inode, struct file *filp)
  152. {
  153. struct ipa_context *ctx = NULL;
  154. IPADBG("ENTER\n");
  155. ctx = container_of(inode->i_cdev, struct ipa_context, cdev);
  156. filp->private_data = ctx;
  157. return 0;
  158. }
  159. static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  160. {
  161. int retval = 0;
  162. u32 pyld_sz;
  163. u8 header[128] = { 0 };
  164. u8 *param = NULL;
  165. struct ipa_ioc_nat_alloc_mem nat_mem;
  166. struct ipa_ioc_v4_nat_init nat_init;
  167. struct ipa_ioc_v4_nat_del nat_del;
  168. struct ipa_ioc_rm_dependency rm_depend;
  169. size_t sz;
  170. int pre_entry;
  171. IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
  172. if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
  173. return -ENOTTY;
  174. if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
  175. return -ENOTTY;
  176. ipa_inc_client_enable_clks();
  177. switch (cmd) {
  178. case IPA_IOC_ALLOC_NAT_MEM:
  179. if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
  180. sizeof(struct ipa_ioc_nat_alloc_mem))) {
  181. retval = -EFAULT;
  182. break;
  183. }
  184. /* null terminate the string */
  185. nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
  186. if (allocate_nat_device(&nat_mem)) {
  187. retval = -EFAULT;
  188. break;
  189. }
  190. if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
  191. sizeof(struct ipa_ioc_nat_alloc_mem))) {
  192. retval = -EFAULT;
  193. break;
  194. }
  195. break;
  196. case IPA_IOC_V4_INIT_NAT:
  197. if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
  198. sizeof(struct ipa_ioc_v4_nat_init))) {
  199. retval = -EFAULT;
  200. break;
  201. }
  202. if (ipa_nat_init_cmd(&nat_init)) {
  203. retval = -EFAULT;
  204. break;
  205. }
  206. break;
  207. case IPA_IOC_NAT_DMA:
  208. if (copy_from_user(header, (u8 *)arg,
  209. sizeof(struct ipa_ioc_nat_dma_cmd))) {
  210. retval = -EFAULT;
  211. break;
  212. }
  213. pre_entry =
  214. ((struct ipa_ioc_nat_dma_cmd *)header)->entries;
  215. pyld_sz =
  216. sizeof(struct ipa_ioc_nat_dma_cmd) +
  217. pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
  218. param = kzalloc(pyld_sz, GFP_KERNEL);
  219. if (!param) {
  220. retval = -ENOMEM;
  221. break;
  222. }
  223. if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
  224. retval = -EFAULT;
  225. break;
  226. }
  227. /* add check in case user-space module compromised */
  228. if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
  229. != pre_entry)) {
  230. IPAERR("current %d pre %d\n",
  231. ((struct ipa_ioc_nat_dma_cmd *)param)->entries,
  232. pre_entry);
  233. retval = -EFAULT;
  234. break;
  235. }
  236. if (ipa_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
  237. retval = -EFAULT;
  238. break;
  239. }
  240. break;
  241. case IPA_IOC_V4_DEL_NAT:
  242. if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
  243. sizeof(struct ipa_ioc_v4_nat_del))) {
  244. retval = -EFAULT;
  245. break;
  246. }
  247. if (ipa_nat_del_cmd(&nat_del)) {
  248. retval = -EFAULT;
  249. break;
  250. }
  251. break;
  252. case IPA_IOC_ADD_HDR:
  253. if (copy_from_user(header, (u8 *)arg,
  254. sizeof(struct ipa_ioc_add_hdr))) {
  255. retval = -EFAULT;
  256. break;
  257. }
  258. pre_entry =
  259. ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
  260. pyld_sz =
  261. sizeof(struct ipa_ioc_add_hdr) +
  262. pre_entry * sizeof(struct ipa_hdr_add);
  263. param = kzalloc(pyld_sz, GFP_KERNEL);
  264. if (!param) {
  265. retval = -ENOMEM;
  266. break;
  267. }
  268. if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
  269. retval = -EFAULT;
  270. break;
  271. }
  272. /* add check in case user-space module compromised */
  273. if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
  274. != pre_entry)) {
  275. IPAERR("current %d pre %d\n",
  276. ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
  277. pre_entry);
  278. retval = -EFAULT;
  279. break;
  280. }
  281. if (ipa_add_hdr((struct ipa_ioc_add_hdr *)param)) {
  282. retval = -EFAULT;
  283. break;
  284. }
  285. if (copy_to_user((u8 *)arg, param, pyld_sz)) {
  286. retval = -EFAULT;
  287. break;
  288. }
  289. break;
  290. case IPA_IOC_DEL_HDR:
  291. if (copy_from_user(header, (u8 *)arg,
  292. sizeof(struct ipa_ioc_del_hdr))) {
  293. retval = -EFAULT;
  294. break;
  295. }
  296. pre_entry =
  297. ((struct ipa_ioc_del_hdr *)header)->num_hdls;
  298. pyld_sz =
  299. sizeof(struct ipa_ioc_del_hdr) +
  300. pre_entry * sizeof(struct ipa_hdr_del);
  301. param = kzalloc(pyld_sz, GFP_KERNEL);
  302. if (!param) {
  303. retval = -ENOMEM;
  304. break;
  305. }
  306. if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
  307. retval = -EFAULT;
  308. break;
  309. }
  310. /* add check in case user-space module compromised */
  311. if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
  312. != pre_entry)) {
  313. IPAERR("current %d pre %d\n",
  314. ((struct ipa_ioc_del_hdr *)param)->num_hdls,
  315. pre_entry);
  316. retval = -EFAULT;
  317. break;
  318. }
  319. if (ipa_del_hdr_by_user((struct ipa_ioc_del_hdr *)param,
  320. true)) {
  321. retval = -EFAULT;
  322. break;
  323. }
  324. if (copy_to_user((u8 *)arg, param, pyld_sz)) {
  325. retval = -EFAULT;
  326. break;
  327. }
  328. break;
  329. case IPA_IOC_ADD_RT_RULE:
  330. if (copy_from_user(header, (u8 *)arg,
  331. sizeof(struct ipa_ioc_add_rt_rule))) {
  332. retval = -EFAULT;
  333. break;
  334. }
  335. pre_entry =
  336. ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
  337. pyld_sz =
  338. sizeof(struct ipa_ioc_add_rt_rule) +
  339. pre_entry * sizeof(struct ipa_rt_rule_add);
  340. param = kzalloc(pyld_sz, GFP_KERNEL);
  341. if (!param) {
  342. retval = -ENOMEM;
  343. break;
  344. }
  345. if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
  346. retval = -EFAULT;
  347. break;
  348. }
  349. /* add check in case user-space module compromised */
  350. if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
  351. != pre_entry)) {
  352. IPAERR("current %d pre %d\n",
  353. ((struct ipa_ioc_add_rt_rule *)param)->num_rules,
  354. pre_entry);
  355. retval = -EFAULT;
  356. break;
  357. }
  358. if (ipa_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
  359. retval = -EFAULT;
  360. break;
  361. }
  362. if (copy_to_user((u8 *)arg, param, pyld_sz)) {
  363. retval = -EFAULT;
  364. break;
  365. }
  366. break;
  367. case IPA_IOC_DEL_RT_RULE:
  368. if (copy_from_user(header, (u8 *)arg,
  369. sizeof(struct ipa_ioc_del_rt_rule))) {
  370. retval = -EFAULT;
  371. break;
  372. }
  373. pre_entry =
  374. ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
  375. pyld_sz =
  376. sizeof(struct ipa_ioc_del_rt_rule) +
  377. pre_entry * sizeof(struct ipa_rt_rule_del);
  378. param = kzalloc(pyld_sz, GFP_KERNEL);
  379. if (!param) {
  380. retval = -ENOMEM;
  381. break;
  382. }
  383. if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
  384. retval = -EFAULT;
  385. break;
  386. }
  387. /* add check in case user-space module compromised */
  388. if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
  389. != pre_entry)) {
  390. IPAERR("current %d pre %d\n",
  391. ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
  392. pre_entry);
  393. retval = -EFAULT;
  394. break;
  395. }
  396. if (ipa_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
  397. retval = -EFAULT;
  398. break;
  399. }
  400. if (copy_to_user((u8 *)arg, param, pyld_sz)) {
  401. retval = -EFAULT;
  402. break;
  403. }
  404. break;
  405. case IPA_IOC_ADD_FLT_RULE:
  406. if (copy_from_user(header, (u8 *)arg,
  407. sizeof(struct ipa_ioc_add_flt_rule))) {
  408. retval = -EFAULT;
  409. break;
  410. }
  411. pre_entry =
  412. ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
  413. pyld_sz =
  414. sizeof(struct ipa_ioc_add_flt_rule) +
  415. pre_entry * sizeof(struct ipa_flt_rule_add);
  416. param = kzalloc(pyld_sz, GFP_KERNEL);
  417. if (!param) {
  418. retval = -ENOMEM;
  419. break;
  420. }
  421. if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
  422. retval = -EFAULT;
  423. break;
  424. }
  425. /* add check in case user-space module compromised */
  426. if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
  427. != pre_entry)) {
  428. IPAERR("current %d pre %d\n",
  429. ((struct ipa_ioc_add_flt_rule *)param)->num_rules,
  430. pre_entry);
  431. retval = -EFAULT;
  432. break;
  433. }
  434. if (ipa_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
  435. retval = -EFAULT;
  436. break;
  437. }
  438. if (copy_to_user((u8 *)arg, param, pyld_sz)) {
  439. retval = -EFAULT;
  440. break;
  441. }
  442. break;
  443. case IPA_IOC_DEL_FLT_RULE:
  444. if (copy_from_user(header, (u8 *)arg,
  445. sizeof(struct ipa_ioc_del_flt_rule))) {
  446. retval = -EFAULT;
  447. break;
  448. }
  449. pre_entry =
  450. ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
  451. pyld_sz =
  452. sizeof(struct ipa_ioc_del_flt_rule) +
  453. pre_entry * sizeof(struct ipa_flt_rule_del);
  454. param = kzalloc(pyld_sz, GFP_KERNEL);
  455. if (!param) {
  456. retval = -ENOMEM;
  457. break;
  458. }
  459. if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
  460. retval = -EFAULT;
  461. break;
  462. }
  463. /* add check in case user-space module compromised */
  464. if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
  465. != pre_entry)) {
  466. IPAERR("current %d pre %d\n",
  467. ((struct ipa_ioc_del_flt_rule *)param)->num_hdls,
  468. pre_entry);
  469. retval = -EFAULT;
  470. break;
  471. }
  472. if (ipa_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
  473. retval = -EFAULT;
  474. break;
  475. }
  476. if (copy_to_user((u8 *)arg, param, pyld_sz)) {
  477. retval = -EFAULT;
  478. break;
  479. }
  480. break;
  481. case IPA_IOC_COMMIT_HDR:
  482. retval = ipa_commit_hdr();
  483. break;
  484. case IPA_IOC_RESET_HDR:
  485. retval = ipa_reset_hdr();
  486. break;
  487. case IPA_IOC_COMMIT_RT:
  488. retval = ipa_commit_rt(arg);
  489. break;
  490. case IPA_IOC_RESET_RT:
  491. retval = ipa_reset_rt(arg);
  492. break;
  493. case IPA_IOC_COMMIT_FLT:
  494. retval = ipa_commit_flt(arg);
  495. break;
  496. case IPA_IOC_RESET_FLT:
  497. retval = ipa_reset_flt(arg);
  498. break;
  499. case IPA_IOC_DUMP:
  500. ipa_dump();
  501. break;
  502. case IPA_IOC_GET_RT_TBL:
  503. if (copy_from_user(header, (u8 *)arg,
  504. sizeof(struct ipa_ioc_get_rt_tbl))) {
  505. retval = -EFAULT;
  506. break;
  507. }
  508. if (ipa_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
  509. retval = -EFAULT;
  510. break;
  511. }
  512. if (copy_to_user((u8 *)arg, header,
  513. sizeof(struct ipa_ioc_get_rt_tbl))) {
  514. retval = -EFAULT;
  515. break;
  516. }
  517. break;
  518. case IPA_IOC_PUT_RT_TBL:
  519. retval = ipa_put_rt_tbl(arg);
  520. break;
  521. case IPA_IOC_GET_HDR:
  522. if (copy_from_user(header, (u8 *)arg,
  523. sizeof(struct ipa_ioc_get_hdr))) {
  524. retval = -EFAULT;
  525. break;
  526. }
  527. if (ipa_get_hdr((struct ipa_ioc_get_hdr *)header)) {
  528. retval = -EFAULT;
  529. break;
  530. }
  531. if (copy_to_user((u8 *)arg, header,
  532. sizeof(struct ipa_ioc_get_hdr))) {
  533. retval = -EFAULT;
  534. break;
  535. }
  536. break;
  537. case IPA_IOC_PUT_HDR:
  538. retval = ipa_put_hdr(arg);
  539. break;
  540. case IPA_IOC_SET_FLT:
  541. retval = ipa_cfg_filter(arg);
  542. break;
  543. case IPA_IOC_COPY_HDR:
  544. if (copy_from_user(header, (u8 *)arg,
  545. sizeof(struct ipa_ioc_copy_hdr))) {
  546. retval = -EFAULT;
  547. break;
  548. }
  549. if (ipa_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
  550. retval = -EFAULT;
  551. break;
  552. }
  553. if (copy_to_user((u8 *)arg, header,
  554. sizeof(struct ipa_ioc_copy_hdr))) {
  555. retval = -EFAULT;
  556. break;
  557. }
  558. break;
  559. case IPA_IOC_QUERY_INTF:
  560. if (copy_from_user(header, (u8 *)arg,
  561. sizeof(struct ipa_ioc_query_intf))) {
  562. retval = -EFAULT;
  563. break;
  564. }
  565. if (ipa_query_intf((struct ipa_ioc_query_intf *)header)) {
  566. retval = -1;
  567. break;
  568. }
  569. if (copy_to_user((u8 *)arg, header,
  570. sizeof(struct ipa_ioc_query_intf))) {
  571. retval = -EFAULT;
  572. break;
  573. }
  574. break;
  575. case IPA_IOC_QUERY_INTF_TX_PROPS:
  576. sz = sizeof(struct ipa_ioc_query_intf_tx_props);
  577. if (copy_from_user(header, (u8 *)arg, sz)) {
  578. retval = -EFAULT;
  579. break;
  580. }
  581. if (((struct ipa_ioc_query_intf_tx_props *)
  582. header)->num_tx_props > IPA_NUM_PROPS_MAX) {
  583. retval = -EFAULT;
  584. break;
  585. }
  586. pre_entry =
  587. ((struct ipa_ioc_query_intf_tx_props *)
  588. header)->num_tx_props;
  589. pyld_sz = sz + pre_entry *
  590. sizeof(struct ipa_ioc_tx_intf_prop);
  591. param = kzalloc(pyld_sz, GFP_KERNEL);
  592. if (!param) {
  593. retval = -ENOMEM;
  594. break;
  595. }
  596. if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
  597. retval = -EFAULT;
  598. break;
  599. }
  600. /* add check in case user-space module compromised */
  601. if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
  602. param)->num_tx_props
  603. != pre_entry)) {
  604. IPAERR("current %d pre %d\n",
  605. ((struct ipa_ioc_query_intf_tx_props *)
  606. param)->num_tx_props, pre_entry);
  607. retval = -EFAULT;
  608. break;
  609. }
  610. if (ipa_query_intf_tx_props(
  611. (struct ipa_ioc_query_intf_tx_props *)param)) {
  612. retval = -1;
  613. break;
  614. }
  615. if (copy_to_user((u8 *)arg, param, pyld_sz)) {
  616. retval = -EFAULT;
  617. break;
  618. }
  619. break;
  620. case IPA_IOC_QUERY_INTF_RX_PROPS:
  621. sz = sizeof(struct ipa_ioc_query_intf_rx_props);
  622. if (copy_from_user(header, (u8 *)arg, sz)) {
  623. retval = -EFAULT;
  624. break;
  625. }
  626. if (((struct ipa_ioc_query_intf_rx_props *)
  627. header)->num_rx_props > IPA_NUM_PROPS_MAX) {
  628. retval = -EFAULT;
  629. break;
  630. }
  631. pre_entry =
  632. ((struct ipa_ioc_query_intf_rx_props *)
  633. header)->num_rx_props;
  634. pyld_sz = sz + pre_entry *
  635. sizeof(struct ipa_ioc_rx_intf_prop);
  636. param = kzalloc(pyld_sz, GFP_KERNEL);
  637. if (!param) {
  638. retval = -ENOMEM;
  639. break;
  640. }
  641. if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
  642. retval = -EFAULT;
  643. break;
  644. }
  645. /* add check in case user-space module compromised */
  646. if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
  647. param)->num_rx_props != pre_entry)) {
  648. IPAERR("current %d pre %d\n",
  649. ((struct ipa_ioc_query_intf_rx_props *)
  650. param)->num_rx_props, pre_entry);
  651. retval = -EFAULT;
  652. break;
  653. }
  654. if (ipa_query_intf_rx_props(
  655. (struct ipa_ioc_query_intf_rx_props *)param)) {
  656. retval = -1;
  657. break;
  658. }
  659. if (copy_to_user((u8 *)arg, param, pyld_sz)) {
  660. retval = -EFAULT;
  661. break;
  662. }
  663. break;
  664. case IPA_IOC_PULL_MSG:
  665. if (copy_from_user(header, (u8 *)arg,
  666. sizeof(struct ipa_msg_meta))) {
  667. retval = -EFAULT;
  668. break;
  669. }
  670. pre_entry =
  671. ((struct ipa_msg_meta *)header)->msg_len;
  672. pyld_sz = sizeof(struct ipa_msg_meta) +
  673. pre_entry;
  674. param = kzalloc(pyld_sz, GFP_KERNEL);
  675. if (!param) {
  676. retval = -ENOMEM;
  677. break;
  678. }
  679. if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
  680. retval = -EFAULT;
  681. break;
  682. }
  683. /* add check in case user-space module compromised */
  684. if (unlikely(((struct ipa_msg_meta *)param)->msg_len
  685. != pre_entry)) {
  686. IPAERR("current %d pre %d\n",
  687. ((struct ipa_msg_meta *)param)->msg_len,
  688. pre_entry);
  689. retval = -EFAULT;
  690. break;
  691. }
  692. if (ipa_pull_msg((struct ipa_msg_meta *)param,
  693. (char *)param + sizeof(struct ipa_msg_meta),
  694. ((struct ipa_msg_meta *)param)->msg_len) !=
  695. ((struct ipa_msg_meta *)param)->msg_len) {
  696. retval = -1;
  697. break;
  698. }
  699. if (copy_to_user((u8 *)arg, param, pyld_sz)) {
  700. retval = -EFAULT;
  701. break;
  702. }
  703. break;
  704. case IPA_IOC_RM_ADD_DEPENDENCY:
  705. if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
  706. sizeof(struct ipa_ioc_rm_dependency))) {
  707. retval = -EFAULT;
  708. break;
  709. }
  710. retval = ipa_rm_add_dependency(rm_depend.resource_name,
  711. rm_depend.depends_on_name);
  712. break;
  713. case IPA_IOC_RM_DEL_DEPENDENCY:
  714. if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
  715. sizeof(struct ipa_ioc_rm_dependency))) {
  716. retval = -EFAULT;
  717. break;
  718. }
  719. retval = ipa_rm_delete_dependency(rm_depend.resource_name,
  720. rm_depend.depends_on_name);
  721. break;
  722. default: /* redundant, as cmd was checked against MAXNR */
  723. ipa_dec_client_disable_clks();
  724. return -ENOTTY;
  725. }
  726. kfree(param);
  727. ipa_dec_client_disable_clks();
  728. return retval;
  729. }
  730. /**
  731. * ipa_setup_dflt_rt_tables() - Setup default routing tables
  732. *
  733. * Return codes:
  734. * 0: success
  735. * -ENOMEM: failed to allocate memory
  736. * -EPERM: failed to add the tables
  737. */
  738. int ipa_setup_dflt_rt_tables(void)
  739. {
  740. struct ipa_ioc_add_rt_rule *rt_rule;
  741. struct ipa_rt_rule_add *rt_rule_entry;
  742. rt_rule =
  743. kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
  744. sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
  745. if (!rt_rule) {
  746. IPAERR("fail to alloc mem\n");
  747. return -ENOMEM;
  748. }
  749. /* setup a default v4 route to point to A5 */
  750. rt_rule->num_rules = 1;
  751. rt_rule->commit = 1;
  752. rt_rule->ip = IPA_IP_v4;
  753. strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
  754. IPA_RESOURCE_NAME_MAX);
  755. rt_rule_entry = &rt_rule->rules[0];
  756. rt_rule_entry->at_rear = 1;
  757. rt_rule_entry->rule.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
  758. rt_rule_entry->rule.hdr_hdl = ipa_ctx->excp_hdr_hdl;
  759. if (ipa_add_rt_rule(rt_rule)) {
  760. IPAERR("fail to add dflt v4 rule\n");
  761. kfree(rt_rule);
  762. return -EPERM;
  763. }
  764. IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
  765. ipa_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
  766. /* setup a default v6 route to point to A5 */
  767. rt_rule->ip = IPA_IP_v6;
  768. if (ipa_add_rt_rule(rt_rule)) {
  769. IPAERR("fail to add dflt v6 rule\n");
  770. kfree(rt_rule);
  771. return -EPERM;
  772. }
  773. IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
  774. ipa_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
  775. /*
  776. * because these tables are the very first to be added, they will both
  777. * have the same index (0) which is essential for programming the
  778. * "route" end-point config
  779. */
  780. kfree(rt_rule);
  781. return 0;
  782. }
  783. static int ipa_setup_exception_path(void)
  784. {
  785. struct ipa_ioc_add_hdr *hdr;
  786. struct ipa_hdr_add *hdr_entry;
  787. struct ipa_route route = { 0 };
  788. int ret;
  789. /* install the basic exception header */
  790. hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
  791. sizeof(struct ipa_hdr_add), GFP_KERNEL);
  792. if (!hdr) {
  793. IPAERR("fail to alloc exception hdr\n");
  794. return -ENOMEM;
  795. }
  796. hdr->num_hdrs = 1;
  797. hdr->commit = 1;
  798. hdr_entry = &hdr->hdr[0];
  799. strlcpy(hdr_entry->name, IPA_DFLT_HDR_NAME, IPA_RESOURCE_NAME_MAX);
  800. /*
  801. * only single stream for MBIM supported and no exception packets
  802. * expected so set default header to zero
  803. * for IPA HW 1.1 and up the default header length is 8 (exception)
  804. */
  805. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
  806. hdr_entry->hdr_len = 1;
  807. hdr_entry->hdr[0] = 0;
  808. } else {
  809. hdr_entry->hdr_len = IPA_DEFAULT_HEADER_LENGTH;
  810. }
  811. /*
  812. * SW does not know anything about default exception header so
  813. * we don't set it. IPA HW will use it as a template
  814. */
  815. if (ipa_add_hdr(hdr)) {
  816. IPAERR("fail to add exception hdr\n");
  817. ret = -EPERM;
  818. goto bail;
  819. }
  820. if (hdr_entry->status) {
  821. IPAERR("fail to add exception hdr\n");
  822. ret = -EPERM;
  823. goto bail;
  824. }
  825. ipa_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
  826. /* exception packets goto LAN-WAN pipe from IPA to A5 */
  827. route.route_def_pipe = IPA_A5_LAN_WAN_IN;
  828. route.route_def_hdr_table = !ipa_ctx->hdr_tbl_lcl;
  829. if (ipa_cfg_route(&route)) {
  830. IPAERR("fail to add exception hdr\n");
  831. ret = -EPERM;
  832. goto bail;
  833. }
  834. ret = 0;
  835. bail:
  836. kfree(hdr);
  837. return ret;
  838. }
  839. static void ipa_poll_function(struct work_struct *work)
  840. {
  841. int ret;
  842. int tx_pipes[] = { IPA_A5_CMD, IPA_A5_LAN_WAN_OUT,
  843. IPA_A5_WLAN_AMPDU_OUT };
  844. int i;
  845. int num_tx_pipes;
  846. int cnt;
  847. num_tx_pipes = sizeof(tx_pipes) / sizeof(tx_pipes[0]);
  848. if (!IPA_MOBILE_AP_MODE(ipa_ctx->mode))
  849. num_tx_pipes--;
  850. do {
  851. cnt = 0;
  852. /* check all the system pipes for tx comp and rx avail */
  853. if (ipa_ctx->sys[IPA_A5_LAN_WAN_IN].ep->valid)
  854. cnt |= ipa_handle_rx_core(
  855. &ipa_ctx->sys[IPA_A5_LAN_WAN_IN],
  856. false, true);
  857. for (i = 0; i < num_tx_pipes; i++)
  858. if (ipa_ctx->sys[tx_pipes[i]].ep->valid)
  859. cnt |= ipa_handle_tx_core(
  860. &ipa_ctx->sys[tx_pipes[i]],
  861. false, true);
  862. } while (cnt);
  863. /* re-post the poll work */
  864. INIT_DELAYED_WORK(&ipa_ctx->poll_work, ipa_poll_function);
  865. ret = schedule_delayed_work_on(smp_processor_id(), &ipa_ctx->poll_work,
  866. msecs_to_jiffies(polling_delay_ms));
  867. return;
  868. }
  869. static int ipa_setup_a5_pipes(void)
  870. {
  871. struct ipa_sys_connect_params sys_in;
  872. int result = 0;
  873. /* CMD OUT (A5->IPA) */
  874. memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
  875. sys_in.client = IPA_CLIENT_A5_CMD_PROD;
  876. sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
  877. sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
  878. sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
  879. if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_cmd)) {
  880. IPAERR(":setup sys pipe failed.\n");
  881. result = -EPERM;
  882. goto fail_cmd;
  883. }
  884. /* Start polling, only if needed */
  885. if (ipa_ctx->polling_mode) {
  886. INIT_DELAYED_WORK(&ipa_ctx->poll_work, ipa_poll_function);
  887. result =
  888. schedule_delayed_work_on(smp_processor_id(),
  889. &ipa_ctx->poll_work,
  890. msecs_to_jiffies(polling_delay_ms));
  891. if (!result) {
  892. IPAERR(":schedule delayed work failed.\n");
  893. goto fail_schedule_delayed_work;
  894. }
  895. }
  896. if (ipa_setup_exception_path()) {
  897. IPAERR(":fail to setup excp path\n");
  898. result = -EPERM;
  899. goto fail_schedule_delayed_work;
  900. }
  901. if (ipa_ctx->ipa_hw_type != IPA_HW_v1_0) {
  902. if (ipa_setup_dflt_rt_tables()) {
  903. IPAERR(":fail to setup dflt routes\n");
  904. result = -EPERM;
  905. goto fail_schedule_delayed_work;
  906. }
  907. }
  908. /* LAN-WAN IN (IPA->A5) */
  909. memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
  910. sys_in.client = IPA_CLIENT_A5_LAN_WAN_CONS;
  911. sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
  912. sys_in.ipa_ep_cfg.hdr.hdr_a5_mux = 1;
  913. sys_in.ipa_ep_cfg.hdr.hdr_len = 8; /* size of A5 exception hdr */
  914. if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_in)) {
  915. IPAERR(":setup sys pipe failed.\n");
  916. result = -EPERM;
  917. goto fail_schedule_delayed_work;
  918. }
  919. /* LAN-WAN OUT (A5->IPA) */
  920. memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
  921. sys_in.client = IPA_CLIENT_A5_LAN_WAN_PROD;
  922. sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
  923. sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
  924. sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
  925. if (ipa_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) {
  926. IPAERR(":setup sys pipe failed.\n");
  927. result = -EPERM;
  928. goto fail_data_out;
  929. }
  930. return 0;
  931. fail_data_out:
  932. ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
  933. fail_schedule_delayed_work:
  934. if (ipa_ctx->dflt_v6_rt_rule_hdl)
  935. __ipa_del_rt_rule(ipa_ctx->dflt_v6_rt_rule_hdl);
  936. if (ipa_ctx->dflt_v4_rt_rule_hdl)
  937. __ipa_del_rt_rule(ipa_ctx->dflt_v4_rt_rule_hdl);
  938. if (ipa_ctx->excp_hdr_hdl)
  939. __ipa_del_hdr(ipa_ctx->excp_hdr_hdl, false);
  940. ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
  941. fail_cmd:
  942. return result;
  943. }
  944. static void ipa_teardown_a5_pipes(void)
  945. {
  946. cancel_delayed_work(&ipa_ctx->poll_work);
  947. ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
  948. ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
  949. __ipa_del_rt_rule(ipa_ctx->dflt_v6_rt_rule_hdl);
  950. __ipa_del_rt_rule(ipa_ctx->dflt_v4_rt_rule_hdl);
  951. __ipa_del_hdr(ipa_ctx->excp_hdr_hdl, false);
  952. ipa_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
  953. }
  954. static int ipa_load_pipe_connection(struct platform_device *pdev,
  955. enum a2_mux_pipe_direction pipe_dir,
  956. struct a2_mux_pipe_connection *pdata)
  957. {
  958. struct device_node *node = pdev->dev.of_node;
  959. int rc = 0;
  960. if (!pdata || !pdev)
  961. goto err;
  962. /* retrieve device tree parameters */
  963. for_each_child_of_node(pdev->dev.of_node, node)
  964. {
  965. const char *str;
  966. rc = of_property_read_string(node, "label", &str);
  967. if (rc) {
  968. IPAERR("Cannot read string\n");
  969. goto err;
  970. }
  971. /* Check if connection type is supported */
  972. if (strncmp(str, "a2-to-ipa", 10)
  973. && strncmp(str, "ipa-to-a2", 10))
  974. goto err;
  975. if (strnstr(str, "a2-to-ipa", strnlen("a2-to-ipa", 10))
  976. && IPA_TO_A2 == pipe_dir)
  977. continue; /* skip to the next pipe */
  978. else if (strnstr(str, "ipa-to-a2", strnlen("ipa-to-a2", 10))
  979. && A2_TO_IPA == pipe_dir)
  980. continue; /* skip to the next pipe */
  981. rc = ipa_update_connections_info(node, pdata);
  982. if (rc)
  983. goto err;
  984. }
  985. return 0;
  986. err:
  987. IPAERR("%s: failed\n", __func__);
  988. return rc;
  989. }
  990. static int ipa_update_connections_info(struct device_node *node,
  991. struct a2_mux_pipe_connection *pipe_connection)
  992. {
  993. u32 rc;
  994. char *key;
  995. uint32_t val;
  996. enum ipa_pipe_mem_type mem_type;
  997. if (!pipe_connection || !node)
  998. return -EINVAL;
  999. key = "qcom,src-bam-physical-address";
  1000. rc = of_property_read_u32(node, key, &val);
  1001. if (rc)
  1002. goto err;
  1003. pipe_connection->src_phy_addr = val;
  1004. key = "qcom,ipa-bam-mem-type";
  1005. rc = of_property_read_u32(node, key, &mem_type);
  1006. if (rc)
  1007. goto err;
  1008. pipe_connection->mem_type = mem_type;
  1009. key = "qcom,src-bam-pipe-index";
  1010. rc = of_property_read_u32(node, key, &val);
  1011. if (rc)
  1012. goto err;
  1013. pipe_connection->src_pipe_index = val;
  1014. key = "qcom,dst-bam-physical-address";
  1015. rc = of_property_read_u32(node, key, &val);
  1016. if (rc)
  1017. goto err;
  1018. pipe_connection->dst_phy_addr = val;
  1019. key = "qcom,dst-bam-pipe-index";
  1020. rc = of_property_read_u32(node, key, &val);
  1021. if (rc)
  1022. goto err;
  1023. pipe_connection->dst_pipe_index = val;
  1024. key = "qcom,data-fifo-offset";
  1025. rc = of_property_read_u32(node, key, &val);
  1026. if (rc)
  1027. goto err;
  1028. pipe_connection->data_fifo_base_offset = val;
  1029. key = "qcom,data-fifo-size";
  1030. rc = of_property_read_u32(node, key, &val);
  1031. if (rc)
  1032. goto err;
  1033. pipe_connection->data_fifo_size = val;
  1034. key = "qcom,descriptor-fifo-offset";
  1035. rc = of_property_read_u32(node, key, &val);
  1036. if (rc)
  1037. goto err;
  1038. pipe_connection->desc_fifo_base_offset = val;
  1039. key = "qcom,descriptor-fifo-size";
  1040. rc = of_property_read_u32(node, key, &val);
  1041. if (rc)
  1042. goto err;
  1043. pipe_connection->desc_fifo_size = val;
  1044. return 0;
  1045. err:
  1046. IPAERR("%s: Error in name %s key %s\n", __func__, node->full_name, key);
  1047. return rc;
  1048. }
  1049. /**
  1050. * ipa_get_a2_mux_pipe_info() - Exposes A2 parameters fetched from DTS
  1051. *
  1052. * @pipe_dir: pipe direction
  1053. * @pipe_connect: connect structure containing the parameters fetched from DTS
  1054. *
  1055. * Return codes:
  1056. * 0: success
  1057. * -EFAULT: invalid parameters
  1058. */
  1059. int ipa_get_a2_mux_pipe_info(enum a2_mux_pipe_direction pipe_dir,
  1060. struct a2_mux_pipe_connection *pipe_connect)
  1061. {
  1062. if (!pipe_connect) {
  1063. IPAERR("ipa_get_a2_mux_pipe_info switch null args\n");
  1064. return -EFAULT;
  1065. }
  1066. switch (pipe_dir) {
  1067. case A2_TO_IPA:
  1068. *pipe_connect = ipa_res.a2_to_ipa_pipe;
  1069. break;
  1070. case IPA_TO_A2:
  1071. *pipe_connect = ipa_res.ipa_to_a2_pipe;
  1072. break;
  1073. default:
  1074. IPAERR("ipa_get_a2_mux_pipe_info switch in default\n");
  1075. return -EFAULT;
  1076. }
  1077. return 0;
  1078. }
  1079. /**
  1080. * ipa_get_a2_mux_bam_info() - Exposes A2 parameters fetched from
  1081. * DTS
  1082. *
  1083. * @a2_bam_mem_base: A2 BAM Memory base
  1084. * @a2_bam_mem_size: A2 BAM Memory size
  1085. * @a2_bam_irq: A2 BAM IRQ
  1086. *
  1087. * Return codes:
  1088. * 0: success
  1089. * -EFAULT: invalid parameters
  1090. */
  1091. int ipa_get_a2_mux_bam_info(u32 *a2_bam_mem_base, u32 *a2_bam_mem_size,
  1092. u32 *a2_bam_irq)
  1093. {
  1094. if (!a2_bam_mem_base || !a2_bam_mem_size || !a2_bam_irq) {
  1095. IPAERR("ipa_get_a2_mux_bam_info null args\n");
  1096. return -EFAULT;
  1097. }
  1098. *a2_bam_mem_base = ipa_res.a2_bam_mem_base;
  1099. *a2_bam_mem_size = ipa_res.a2_bam_mem_size;
  1100. *a2_bam_irq = ipa_res.a2_bam_irq;
  1101. return 0;
  1102. }
  1103. static void ipa_set_aggregation_params(void)
  1104. {
  1105. struct ipa_ep_cfg_aggr agg_params;
  1106. struct ipa_ep_cfg_hdr hdr_params;
  1107. u32 producer_hdl = 0;
  1108. u32 consumer_hdl = 0;
  1109. teth_bridge_get_client_handles(&producer_hdl, &consumer_hdl);
  1110. /* configure aggregation on producer */
  1111. memset(&agg_params, 0, sizeof(struct ipa_ep_cfg_aggr));
  1112. agg_params.aggr_en = IPA_ENABLE_AGGR;
  1113. agg_params.aggr = ipa_ctx->aggregation_type;
  1114. agg_params.aggr_byte_limit = ipa_ctx->aggregation_byte_limit;
  1115. agg_params.aggr_time_limit = ipa_ctx->aggregation_time_limit;
  1116. ipa_cfg_ep_aggr(producer_hdl, &agg_params);
  1117. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
  1118. /* configure header on producer */
  1119. memset(&hdr_params, 0, sizeof(struct ipa_ep_cfg_hdr));
  1120. hdr_params.hdr_len = 1;
  1121. ipa_cfg_ep_hdr(producer_hdl, &hdr_params);
  1122. }
  1123. /* configure deaggregation on consumer */
  1124. memset(&agg_params, 0, sizeof(struct ipa_ep_cfg_aggr));
  1125. agg_params.aggr_en = IPA_ENABLE_DEAGGR;
  1126. agg_params.aggr = ipa_ctx->aggregation_type;
  1127. ipa_cfg_ep_aggr(consumer_hdl, &agg_params);
  1128. }
  1129. /*
  1130. * The following device attributes are for configuring the aggregation
  1131. * attributes when the driver is already running.
  1132. * The attributes are for configuring the aggregation type
  1133. * (MBIM_16/MBIM_32/TLP), the aggregation byte limit and the aggregation
  1134. * time limit.
  1135. */
  1136. static ssize_t ipa_show_aggregation_type(struct device *dev,
  1137. struct device_attribute *attr,
  1138. char *buf)
  1139. {
  1140. ssize_t ret_val;
  1141. char str[IPA_AGGR_MAX_STR_LENGTH];
  1142. if (!buf) {
  1143. IPAERR("buffer for ipa_show_aggregation_type is NULL\n");
  1144. return -EINVAL;
  1145. }
  1146. memset(str, 0, sizeof(str));
  1147. switch (ipa_ctx->aggregation_type) {
  1148. case IPA_MBIM_16:
  1149. strlcpy(str, "MBIM_16", IPA_AGGR_STR_IN_BYTES("MBIM_16"));
  1150. break;
  1151. case IPA_MBIM_32:
  1152. strlcpy(str, "MBIM_32", IPA_AGGR_STR_IN_BYTES("MBIM_32"));
  1153. break;
  1154. case IPA_TLP:
  1155. strlcpy(str, "TLP", IPA_AGGR_STR_IN_BYTES("TLP"));
  1156. break;
  1157. default:
  1158. strlcpy(str, "NONE", IPA_AGGR_STR_IN_BYTES("NONE"));
  1159. break;
  1160. }
  1161. ret_val = scnprintf(buf, PAGE_SIZE, "%s\n", str);
  1162. return ret_val;
  1163. }
  1164. static ssize_t ipa_store_aggregation_type(struct device *dev,
  1165. struct device_attribute *attr,
  1166. const char *buf, size_t count)
  1167. {
  1168. char str[IPA_AGGR_MAX_STR_LENGTH], *pstr;
  1169. if (!buf) {
  1170. IPAERR("buffer for ipa_store_aggregation_type is NULL\n");
  1171. return -EINVAL;
  1172. }
  1173. strlcpy(str, buf, sizeof(str));
  1174. pstr = strim(str);
  1175. if (!strncmp(pstr, "MBIM_16", IPA_AGGR_STR_IN_BYTES("MBIM_16")))
  1176. ipa_ctx->aggregation_type = IPA_MBIM_16;
  1177. else if (!strncmp(pstr, "MBIM_32", IPA_AGGR_STR_IN_BYTES("MBIM_32")))
  1178. ipa_ctx->aggregation_type = IPA_MBIM_32;
  1179. else if (!strncmp(pstr, "TLP", IPA_AGGR_STR_IN_BYTES("TLP")))
  1180. ipa_ctx->aggregation_type = IPA_TLP;
  1181. else {
  1182. IPAERR("ipa_store_aggregation_type wrong input\n");
  1183. return -EINVAL;
  1184. }
  1185. ipa_set_aggregation_params();
  1186. return count;
  1187. }
  1188. static DEVICE_ATTR(aggregation_type, S_IWUSR | S_IRUSR,
  1189. ipa_show_aggregation_type,
  1190. ipa_store_aggregation_type);
  1191. static ssize_t ipa_show_aggregation_byte_limit(struct device *dev,
  1192. struct device_attribute *attr,
  1193. char *buf)
  1194. {
  1195. ssize_t ret_val;
  1196. if (!buf) {
  1197. IPAERR("buffer for ipa_show_aggregation_byte_limit is NULL\n");
  1198. return -EINVAL;
  1199. }
  1200. ret_val = scnprintf(buf, PAGE_SIZE, "%u\n",
  1201. ipa_ctx->aggregation_byte_limit);
  1202. return ret_val;
  1203. }
  1204. static ssize_t ipa_store_aggregation_byte_limit(struct device *dev,
  1205. struct device_attribute *attr,
  1206. const char *buf, size_t count)
  1207. {
  1208. char str[IPA_AGGR_MAX_STR_LENGTH];
  1209. char *pstr;
  1210. u32 ret = 0;
  1211. if (!buf) {
  1212. IPAERR("buffer for ipa_store_aggregation_byte_limit is NULL\n");
  1213. return -EINVAL;
  1214. }
  1215. strlcpy(str, buf, sizeof(str));
  1216. pstr = strim(str);
  1217. if (kstrtouint(pstr, IPA_AGGR_MAX_STR_LENGTH, &ret)) {
  1218. IPAERR("ipa_store_aggregation_byte_limit wrong input\n");
  1219. return -EINVAL;
  1220. }
  1221. ipa_ctx->aggregation_byte_limit = ret;
  1222. ipa_set_aggregation_params();
  1223. return count;
  1224. }
  1225. static DEVICE_ATTR(aggregation_byte_limit, S_IWUSR | S_IRUSR,
  1226. ipa_show_aggregation_byte_limit,
  1227. ipa_store_aggregation_byte_limit);
  1228. static ssize_t ipa_show_aggregation_time_limit(struct device *dev,
  1229. struct device_attribute *attr,
  1230. char *buf)
  1231. {
  1232. ssize_t ret_val;
  1233. if (!buf) {
  1234. IPAERR("buffer for ipa_show_aggregation_time_limit is NULL\n");
  1235. return -EINVAL;
  1236. }
  1237. ret_val = scnprintf(buf,
  1238. PAGE_SIZE,
  1239. "%u\n",
  1240. ipa_ctx->aggregation_time_limit);
  1241. return ret_val;
  1242. }
  1243. static ssize_t ipa_store_aggregation_time_limit(struct device *dev,
  1244. struct device_attribute *attr,
  1245. const char *buf, size_t count)
  1246. {
  1247. char str[IPA_AGGR_MAX_STR_LENGTH], *pstr;
  1248. u32 ret = 0;
  1249. if (!buf) {
  1250. IPAERR("buffer for ipa_store_aggregation_time_limit is NULL\n");
  1251. return -EINVAL;
  1252. }
  1253. strlcpy(str, buf, sizeof(str));
  1254. pstr = strim(str);
  1255. if (kstrtouint(pstr, IPA_AGGR_MAX_STR_LENGTH, &ret)) {
  1256. IPAERR("ipa_store_aggregation_time_limit wrong input\n");
  1257. return -EINVAL;
  1258. }
  1259. ipa_ctx->aggregation_time_limit = ret;
  1260. ipa_set_aggregation_params();
  1261. return count;
  1262. }
  1263. static DEVICE_ATTR(aggregation_time_limit, S_IWUSR | S_IRUSR,
  1264. ipa_show_aggregation_time_limit,
  1265. ipa_store_aggregation_time_limit);
  1266. static const struct file_operations ipa_drv_fops = {
  1267. .owner = THIS_MODULE,
  1268. .open = ipa_open,
  1269. .read = ipa_read,
  1270. .unlocked_ioctl = ipa_ioctl,
  1271. };
  1272. static int ipa_get_clks(struct device *dev)
  1273. {
  1274. ipa_cnoc_clk = clk_get(dev, "iface_clk");
  1275. if (IS_ERR(ipa_cnoc_clk)) {
  1276. ipa_cnoc_clk = NULL;
  1277. IPAERR("fail to get cnoc clk\n");
  1278. return -ENODEV;
  1279. }
  1280. ipa_clk_src = clk_get(dev, "core_src_clk");
  1281. if (IS_ERR(ipa_clk_src)) {
  1282. ipa_clk_src = NULL;
  1283. IPAERR("fail to get ipa clk src\n");
  1284. return -ENODEV;
  1285. }
  1286. ipa_clk = clk_get(dev, "core_clk");
  1287. if (IS_ERR(ipa_clk)) {
  1288. ipa_clk = NULL;
  1289. IPAERR("fail to get ipa clk\n");
  1290. return -ENODEV;
  1291. }
  1292. sys_noc_ipa_axi_clk = clk_get(dev, "bus_clk");
  1293. if (IS_ERR(sys_noc_ipa_axi_clk)) {
  1294. sys_noc_ipa_axi_clk = NULL;
  1295. IPAERR("fail to get sys_noc_ipa_axi clk\n");
  1296. return -ENODEV;
  1297. }
  1298. ipa_inactivity_clk = clk_get(dev, "inactivity_clk");
  1299. if (IS_ERR(ipa_inactivity_clk)) {
  1300. ipa_inactivity_clk = NULL;
  1301. IPAERR("fail to get inactivity clk\n");
  1302. return -ENODEV;
  1303. }
  1304. return 0;
  1305. }
  1306. /**
  1307. * ipa_enable_clks() - Turn on IPA clocks
  1308. *
  1309. * Return codes:
  1310. * None
  1311. */
  1312. void ipa_enable_clks(void)
  1313. {
  1314. if (ipa_cnoc_clk) {
  1315. clk_prepare(ipa_cnoc_clk);
  1316. clk_enable(ipa_cnoc_clk);
  1317. clk_set_rate(ipa_cnoc_clk, IPA_CNOC_CLK_RATE);
  1318. } else {
  1319. WARN_ON(1);
  1320. }
  1321. if (ipa_clk_src)
  1322. if (ipa_res.ipa_hw_type == IPA_HW_v1_0)
  1323. clk_set_rate(ipa_clk_src, IPA_V1_CLK_RATE);
  1324. else if (ipa_res.ipa_hw_type == IPA_HW_v1_1)
  1325. clk_set_rate(ipa_clk_src, IPA_V1_1_CLK_RATE);
  1326. else
  1327. WARN_ON(1);
  1328. else
  1329. WARN_ON(1);
  1330. if (ipa_clk)
  1331. clk_prepare(ipa_clk);
  1332. else
  1333. WARN_ON(1);
  1334. if (sys_noc_ipa_axi_clk)
  1335. clk_prepare(sys_noc_ipa_axi_clk);
  1336. else
  1337. WARN_ON(1);
  1338. if (ipa_inactivity_clk)
  1339. clk_prepare(ipa_inactivity_clk);
  1340. else
  1341. WARN_ON(1);
  1342. if (ipa_clk)
  1343. clk_enable(ipa_clk);
  1344. else
  1345. WARN_ON(1);
  1346. if (sys_noc_ipa_axi_clk)
  1347. clk_enable(sys_noc_ipa_axi_clk);
  1348. else
  1349. WARN_ON(1);
  1350. if (ipa_inactivity_clk)
  1351. clk_enable(ipa_inactivity_clk);
  1352. else
  1353. WARN_ON(1);
  1354. if (msm_bus_scale_client_update_request(ipa_bus_hdl, 1))
  1355. WARN_ON(1);
  1356. }
  1357. /**
  1358. * ipa_disable_clks() - Turn off IPA clocks
  1359. *
  1360. * Return codes:
  1361. * None
  1362. */
  1363. void ipa_disable_clks(void)
  1364. {
  1365. if (ipa_inactivity_clk)
  1366. clk_disable_unprepare(ipa_inactivity_clk);
  1367. else
  1368. WARN_ON(1);
  1369. if (sys_noc_ipa_axi_clk)
  1370. clk_disable_unprepare(sys_noc_ipa_axi_clk);
  1371. else
  1372. WARN_ON(1);
  1373. if (ipa_clk)
  1374. clk_disable_unprepare(ipa_clk);
  1375. else
  1376. WARN_ON(1);
  1377. if (ipa_cnoc_clk)
  1378. clk_disable_unprepare(ipa_cnoc_clk);
  1379. else
  1380. WARN_ON(1);
  1381. if (msm_bus_scale_client_update_request(ipa_bus_hdl, 0))
  1382. WARN_ON(1);
  1383. }
  1384. /**
  1385. * ipa_inc_client_enable_clks() - Increase active clients counter, and
  1386. * enable ipa clocks if necessary
  1387. *
  1388. * Return codes:
  1389. * None
  1390. */
  1391. void ipa_inc_client_enable_clks(void)
  1392. {
  1393. mutex_lock(&ipa_ctx->ipa_active_clients_lock);
  1394. ipa_ctx->ipa_active_clients++;
  1395. if (ipa_ctx->ipa_active_clients == 1)
  1396. if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
  1397. ipa_enable_clks();
  1398. mutex_unlock(&ipa_ctx->ipa_active_clients_lock);
  1399. }
  1400. /**
  1401. * ipa_dec_client_disable_clks() - Decrease active clients counter, and
  1402. * disable ipa clocks if necessary
  1403. *
  1404. * Return codes:
  1405. * None
  1406. */
  1407. void ipa_dec_client_disable_clks(void)
  1408. {
  1409. mutex_lock(&ipa_ctx->ipa_active_clients_lock);
  1410. ipa_ctx->ipa_active_clients--;
  1411. if (ipa_ctx->ipa_active_clients == 0)
  1412. if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
  1413. ipa_disable_clks();
  1414. mutex_unlock(&ipa_ctx->ipa_active_clients_lock);
  1415. }
  1416. static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res)
  1417. {
  1418. void *bam_cnfg_bits;
  1419. if ((ipa_ctx->ipa_hw_type == IPA_HW_v1_0) ||
  1420. (ipa_ctx->ipa_hw_type == IPA_HW_v1_1)) {
  1421. bam_cnfg_bits = ioremap(res->ipa_mem_base +
  1422. IPA_BAM_REG_BASE_OFST,
  1423. IPA_BAM_REMAP_SIZE);
  1424. if (!bam_cnfg_bits)
  1425. return -ENOMEM;
  1426. ipa_write_reg(bam_cnfg_bits, IPA_BAM_CNFG_BITS_OFST,
  1427. IPA_BAM_CNFG_BITS_VAL);
  1428. iounmap(bam_cnfg_bits);
  1429. }
  1430. return 0;
  1431. }
  1432. static int ipa_init_flt_block(void)
  1433. {
  1434. int result = 0;
  1435. /*
  1436. * SW workaround for Improper Filter Behaviour when neiher Global nor
  1437. * Pipe Rules are present => configure dummy global filter rule
  1438. * always which results in a miss
  1439. */
  1440. struct ipa_ioc_add_flt_rule *rules;
  1441. struct ipa_flt_rule_add *rule;
  1442. struct ipa_ioc_get_rt_tbl rt_lookup;
  1443. enum ipa_ip_type ip;
  1444. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
  1445. size_t sz = sizeof(struct ipa_ioc_add_flt_rule) +
  1446. sizeof(struct ipa_flt_rule_add);
  1447. rules = kmalloc(sz, GFP_KERNEL);
  1448. if (rules == NULL) {
  1449. IPAERR("fail to alloc mem for dummy filter rule\n");
  1450. return -ENOMEM;
  1451. }
  1452. for (ip = IPA_IP_v4; ip < IPA_IP_MAX; ip++) {
  1453. memset(&rt_lookup, 0,
  1454. sizeof(struct ipa_ioc_get_rt_tbl));
  1455. rt_lookup.ip = ip;
  1456. strlcpy(rt_lookup.name, IPA_DFLT_RT_TBL_NAME,
  1457. IPA_RESOURCE_NAME_MAX);
  1458. ipa_get_rt_tbl(&rt_lookup);
  1459. ipa_put_rt_tbl(rt_lookup.hdl);
  1460. memset(rules, 0, sz);
  1461. rule = &rules->rules[0];
  1462. rules->commit = 1;
  1463. rules->ip = ip;
  1464. rules->global = 1;
  1465. rules->num_rules = 1;
  1466. rule->at_rear = 1;
  1467. if (ip == IPA_IP_v4) {
  1468. rule->rule.attrib.attrib_mask =
  1469. IPA_FLT_PROTOCOL;
  1470. rule->rule.attrib.u.v4.protocol =
  1471. IPA_INVALID_L4_PROTOCOL;
  1472. } else if (ip == IPA_IP_v6) {
  1473. rule->rule.attrib.attrib_mask =
  1474. IPA_FLT_NEXT_HDR;
  1475. rule->rule.attrib.u.v6.next_hdr =
  1476. IPA_INVALID_L4_PROTOCOL;
  1477. } else {
  1478. result = -EINVAL;
  1479. WARN_ON(1);
  1480. break;
  1481. }
  1482. rule->rule.action = IPA_PASS_TO_ROUTING;
  1483. rule->rule.rt_tbl_hdl = rt_lookup.hdl;
  1484. if (ipa_add_flt_rule(rules) || rules->rules[0].status) {
  1485. result = -EINVAL;
  1486. WARN_ON(1);
  1487. break;
  1488. }
  1489. }
  1490. kfree(rules);
  1491. }
  1492. return result;
  1493. }
  1494. /**
  1495. * ipa_init() - Initialize the IPA Driver
  1496. *@resource_p: contain platform specific values from DST file
  1497. *
  1498. * Function initialization process:
  1499. * - Allocate memory for the driver context data struct
  1500. * - Initializing the ipa_ctx with:
  1501. * 1)parsed values from the dts file
  1502. * 2)parameters passed to the module initialization
  1503. * 3)read HW values(such as core memory size)
  1504. * - Map IPA core registers to CPU memory
  1505. * - Restart IPA core(HW reset)
  1506. * - Register IPA BAM to SPS driver and get a BAM handler
  1507. * - Set configuration for IPA BAM via BAM_CNFG_BITS
  1508. * - Initialize the look-aside caches(kmem_cache/slab) for filter,
  1509. * routing and IPA-tree
  1510. * - Create memory pool with 4 objects for DMA operations(each object
  1511. * is 512Bytes long), this object will be use for tx(A5->IPA)
  1512. * - Initialize lists head(routing,filter,hdr,system pipes)
  1513. * - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
  1514. * - Initialize spinlocks (for list related to A5<->IPA pipes)
  1515. * - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
  1516. * - Initialize Red-Black-Tree(s) for handles of header,routing rule,
  1517. * routing table ,filtering rule
  1518. * - Setup all A5<->IPA pipes by calling to ipa_setup_a5_pipes
  1519. * - Preparing the descriptors for System pipes
  1520. * - Initialize the filter block by committing IPV4 and IPV6 default rules
  1521. * - Create empty routing table in system memory(no committing)
  1522. * - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms
  1523. * - Create a char-device for IPA
  1524. * - Initialize IPA RM (resource manager)
  1525. */
  1526. static int ipa_init(const struct ipa_plat_drv_res *resource_p)
  1527. {
  1528. int result = 0;
  1529. int i;
  1530. struct sps_bam_props bam_props = { 0 };
  1531. struct ipa_flt_tbl *flt_tbl;
  1532. struct ipa_rt_tbl_set *rset;
  1533. IPADBG("IPA init\n");
  1534. ipa_ctx = kzalloc(sizeof(*ipa_ctx), GFP_KERNEL);
  1535. if (!ipa_ctx) {
  1536. IPAERR(":kzalloc err.\n");
  1537. result = -ENOMEM;
  1538. goto fail_mem;
  1539. }
  1540. IPADBG("polling_mode=%u delay_ms=%u\n", polling_mode, polling_delay_ms);
  1541. ipa_ctx->polling_mode = polling_mode;
  1542. IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
  1543. hdr_tbl_lcl, ip4_rt_tbl_lcl, ip6_rt_tbl_lcl, ip4_flt_tbl_lcl,
  1544. ip6_flt_tbl_lcl);
  1545. ipa_ctx->hdr_tbl_lcl = hdr_tbl_lcl;
  1546. ipa_ctx->ip4_rt_tbl_lcl = ip4_rt_tbl_lcl;
  1547. ipa_ctx->ip6_rt_tbl_lcl = ip6_rt_tbl_lcl;
  1548. ipa_ctx->ip4_flt_tbl_lcl = ip4_flt_tbl_lcl;
  1549. ipa_ctx->ip6_flt_tbl_lcl = ip6_flt_tbl_lcl;
  1550. ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
  1551. ipa_ctx->ipa_hw_type = resource_p->ipa_hw_type;
  1552. ipa_ctx->ipa_hw_mode = resource_p->ipa_hw_mode;
  1553. /* setup IPA register access */
  1554. ipa_ctx->mmio = ioremap(resource_p->ipa_mem_base + IPA_REG_BASE_OFST,
  1555. resource_p->ipa_mem_size);
  1556. if (!ipa_ctx->mmio) {
  1557. IPAERR(":ipa-base ioremap err.\n");
  1558. result = -EFAULT;
  1559. goto fail_remap;
  1560. }
  1561. /* do POR programming to setup HW */
  1562. result = ipa_init_hw();
  1563. if (result) {
  1564. IPAERR(":error initializing driver.\n");
  1565. result = -ENODEV;
  1566. goto fail_init_hw;
  1567. }
  1568. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
  1569. /* setup chicken bits */
  1570. result = ipa_set_single_ndp_per_mbim(true);
  1571. if (result) {
  1572. IPAERR(":failed to set single ndp per mbim.\n");
  1573. result = -EFAULT;
  1574. goto fail_init_hw;
  1575. }
  1576. result = ipa_set_hw_timer_fix_for_mbim_aggr(true);
  1577. if (result) {
  1578. IPAERR(":failed to set HW timer fix for MBIM agg.\n");
  1579. result = -EFAULT;
  1580. goto fail_init_hw;
  1581. }
  1582. }
  1583. /* read how much SRAM is available for SW use */
  1584. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
  1585. ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio,
  1586. IPA_SHARED_MEM_SIZE_OFST_v1);
  1587. else
  1588. ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio,
  1589. IPA_SHARED_MEM_SIZE_OFST_v2);
  1590. if (IPA_RAM_END_OFST > ipa_ctx->smem_sz) {
  1591. IPAERR("SW expect more core memory, needed %d, avail %d\n",
  1592. IPA_RAM_END_OFST, ipa_ctx->smem_sz);
  1593. result = -ENOMEM;
  1594. goto fail_init_hw;
  1595. }
  1596. /* register IPA with SPS driver */
  1597. bam_props.phys_addr = resource_p->bam_mem_base;
  1598. bam_props.virt_size = resource_p->bam_mem_size;
  1599. bam_props.irq = resource_p->bam_irq;
  1600. bam_props.num_pipes = IPA_NUM_PIPES;
  1601. bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
  1602. bam_props.event_threshold = IPA_EVENT_THRESHOLD;
  1603. bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
  1604. result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle);
  1605. if (result) {
  1606. IPAERR(":bam register err.\n");
  1607. result = -ENODEV;
  1608. goto fail_init_hw;
  1609. }
  1610. if (ipa_setup_bam_cfg(resource_p)) {
  1611. IPAERR(":bam cfg err.\n");
  1612. result = -ENODEV;
  1613. goto fail_flt_rule_cache;
  1614. }
  1615. /* set up the default op mode */
  1616. ipa_ctx->mode = IPA_MODE_MOBILE_AP_WAN;
  1617. /* init the lookaside cache */
  1618. ipa_ctx->flt_rule_cache = kmem_cache_create("IPA FLT",
  1619. sizeof(struct ipa_flt_entry), 0, 0, NULL);
  1620. if (!ipa_ctx->flt_rule_cache) {
  1621. IPAERR(":ipa flt cache create failed\n");
  1622. result = -ENOMEM;
  1623. goto fail_flt_rule_cache;
  1624. }
  1625. ipa_ctx->rt_rule_cache = kmem_cache_create("IPA RT",
  1626. sizeof(struct ipa_rt_entry), 0, 0, NULL);
  1627. if (!ipa_ctx->rt_rule_cache) {
  1628. IPAERR(":ipa rt cache create failed\n");
  1629. result = -ENOMEM;
  1630. goto fail_rt_rule_cache;
  1631. }
  1632. ipa_ctx->hdr_cache = kmem_cache_create("IPA HDR",
  1633. sizeof(struct ipa_hdr_entry), 0, 0, NULL);
  1634. if (!ipa_ctx->hdr_cache) {
  1635. IPAERR(":ipa hdr cache create failed\n");
  1636. result = -ENOMEM;
  1637. goto fail_hdr_cache;
  1638. }
  1639. ipa_ctx->hdr_offset_cache =
  1640. kmem_cache_create("IPA HDR OFF", sizeof(struct ipa_hdr_offset_entry),
  1641. 0, 0, NULL);
  1642. if (!ipa_ctx->hdr_offset_cache) {
  1643. IPAERR(":ipa hdr off cache create failed\n");
  1644. result = -ENOMEM;
  1645. goto fail_hdr_offset_cache;
  1646. }
  1647. ipa_ctx->rt_tbl_cache = kmem_cache_create("IPA RT TBL",
  1648. sizeof(struct ipa_rt_tbl), 0, 0, NULL);
  1649. if (!ipa_ctx->rt_tbl_cache) {
  1650. IPAERR(":ipa rt tbl cache create failed\n");
  1651. result = -ENOMEM;
  1652. goto fail_rt_tbl_cache;
  1653. }
  1654. ipa_ctx->tx_pkt_wrapper_cache =
  1655. kmem_cache_create("IPA TX PKT WRAPPER",
  1656. sizeof(struct ipa_tx_pkt_wrapper), 0, 0, NULL);
  1657. if (!ipa_ctx->tx_pkt_wrapper_cache) {
  1658. IPAERR(":ipa tx pkt wrapper cache create failed\n");
  1659. result = -ENOMEM;
  1660. goto fail_tx_pkt_wrapper_cache;
  1661. }
  1662. ipa_ctx->rx_pkt_wrapper_cache =
  1663. kmem_cache_create("IPA RX PKT WRAPPER",
  1664. sizeof(struct ipa_rx_pkt_wrapper), 0, 0, NULL);
  1665. if (!ipa_ctx->rx_pkt_wrapper_cache) {
  1666. IPAERR(":ipa rx pkt wrapper cache create failed\n");
  1667. result = -ENOMEM;
  1668. goto fail_rx_pkt_wrapper_cache;
  1669. }
  1670. ipa_ctx->tree_node_cache =
  1671. kmem_cache_create("IPA TREE", sizeof(struct ipa_tree_node), 0, 0,
  1672. NULL);
  1673. if (!ipa_ctx->tree_node_cache) {
  1674. IPAERR(":ipa tree node cache create failed\n");
  1675. result = -ENOMEM;
  1676. goto fail_tree_node_cache;
  1677. }
  1678. /*
  1679. * setup DMA pool 4 byte aligned, don't cross 1k boundaries, nominal
  1680. * size 512 bytes
  1681. * This is an issue with IPA HW v1.0 only.
  1682. */
  1683. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0) {
  1684. ipa_ctx->dma_pool = dma_pool_create("ipa_1k",
  1685. NULL,
  1686. IPA_DMA_POOL_SIZE, IPA_DMA_POOL_ALIGNMENT,
  1687. IPA_DMA_POOL_BOUNDARY);
  1688. } else {
  1689. ipa_ctx->dma_pool = dma_pool_create("ipa_tx", NULL,
  1690. IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec),
  1691. 0, 0);
  1692. }
  1693. if (!ipa_ctx->dma_pool) {
  1694. IPAERR("cannot alloc DMA pool.\n");
  1695. result = -ENOMEM;
  1696. goto fail_dma_pool;
  1697. }
  1698. ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
  1699. ipa_ctx->glob_flt_tbl[IPA_IP_v6].in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
  1700. /* init the various list heads */
  1701. INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v4].head_flt_rule_list);
  1702. INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v6].head_flt_rule_list);
  1703. INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_hdr_entry_list);
  1704. for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
  1705. INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_offset_list[i]);
  1706. INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_free_offset_list[i]);
  1707. }
  1708. INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
  1709. INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
  1710. for (i = 0; i < IPA_NUM_PIPES; i++) {
  1711. flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v4];
  1712. INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
  1713. flt_tbl->in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
  1714. flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v6];
  1715. INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
  1716. flt_tbl->in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
  1717. }
  1718. rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v4];
  1719. INIT_LIST_HEAD(&rset->head_rt_tbl_list);
  1720. rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v6];
  1721. INIT_LIST_HEAD(&rset->head_rt_tbl_list);
  1722. INIT_LIST_HEAD(&ipa_ctx->intf_list);
  1723. INIT_LIST_HEAD(&ipa_ctx->msg_list);
  1724. INIT_LIST_HEAD(&ipa_ctx->pull_msg_list);
  1725. init_waitqueue_head(&ipa_ctx->msg_waitq);
  1726. mutex_init(&ipa_ctx->msg_lock);
  1727. mutex_init(&ipa_ctx->lock);
  1728. mutex_init(&ipa_ctx->nat_mem.lock);
  1729. for (i = 0; i < IPA_A5_SYS_MAX; i++) {
  1730. INIT_LIST_HEAD(&ipa_ctx->sys[i].head_desc_list);
  1731. spin_lock_init(&ipa_ctx->sys[i].spinlock);
  1732. if (i != IPA_A5_WLAN_AMPDU_OUT)
  1733. ipa_ctx->sys[i].ep = &ipa_ctx->ep[i];
  1734. else
  1735. ipa_ctx->sys[i].ep = &ipa_ctx->ep[WLAN_AMPDU_TX_EP];
  1736. if (ipa_ctx->polling_mode)
  1737. atomic_set(&ipa_ctx->sys[i].curr_polling_state, 1);
  1738. else
  1739. atomic_set(&ipa_ctx->sys[i].curr_polling_state, 0);
  1740. }
  1741. ipa_ctx->rx_wq = create_singlethread_workqueue("ipa rx wq");
  1742. if (!ipa_ctx->rx_wq) {
  1743. IPAERR(":fail to create rx wq\n");
  1744. result = -ENOMEM;
  1745. goto fail_rx_wq;
  1746. }
  1747. ipa_ctx->tx_wq = alloc_workqueue("ipa tx wq", WQ_MEM_RECLAIM |
  1748. WQ_CPU_INTENSIVE, 1);
  1749. if (!ipa_ctx->tx_wq) {
  1750. IPAERR(":fail to create tx wq\n");
  1751. result = -ENOMEM;
  1752. goto fail_tx_wq;
  1753. }
  1754. ipa_ctx->hdr_hdl_tree = RB_ROOT;
  1755. ipa_ctx->rt_rule_hdl_tree = RB_ROOT;
  1756. ipa_ctx->rt_tbl_hdl_tree = RB_ROOT;
  1757. ipa_ctx->flt_rule_hdl_tree = RB_ROOT;
  1758. mutex_init(&ipa_ctx->ipa_active_clients_lock);
  1759. ipa_ctx->ipa_active_clients = 0;
  1760. result = ipa_bridge_init();
  1761. if (result) {
  1762. IPAERR("ipa bridge init err.\n");
  1763. result = -ENODEV;
  1764. goto fail_a5_pipes;
  1765. }
  1766. /* setup the A5-IPA pipes */
  1767. if (ipa_setup_a5_pipes()) {
  1768. IPAERR(":failed to setup IPA-A5 pipes.\n");
  1769. result = -ENODEV;
  1770. goto fail_a5_pipes;
  1771. }
  1772. ipa_replenish_rx_cache();
  1773. if (ipa_init_flt_block()) {
  1774. IPAERR("fail to setup dummy filter rules\n");
  1775. result = -ENODEV;
  1776. goto fail_empty_rt_tbl;
  1777. }
  1778. /*
  1779. * setup an empty routing table in system memory, this will be used
  1780. * to delete a routing table cleanly and safely
  1781. */
  1782. ipa_ctx->empty_rt_tbl_mem.size = IPA_ROUTING_RULE_BYTE_SIZE;
  1783. ipa_ctx->empty_rt_tbl_mem.base =
  1784. dma_alloc_coherent(NULL, ipa_ctx->empty_rt_tbl_mem.size,
  1785. &ipa_ctx->empty_rt_tbl_mem.phys_base,
  1786. GFP_KERNEL);
  1787. if (!ipa_ctx->empty_rt_tbl_mem.base) {
  1788. IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n",
  1789. ipa_ctx->empty_rt_tbl_mem.size);
  1790. result = -ENOMEM;
  1791. goto fail_empty_rt_tbl;
  1792. }
  1793. memset(ipa_ctx->empty_rt_tbl_mem.base, 0,
  1794. ipa_ctx->empty_rt_tbl_mem.size);
  1795. /* setup the IPA pipe mem pool */
  1796. ipa_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
  1797. resource_p->ipa_pipe_mem_size);
  1798. ipa_ctx->class = class_create(THIS_MODULE, DRV_NAME);
  1799. result = alloc_chrdev_region(&ipa_ctx->dev_num, 0, 1, DRV_NAME);
  1800. if (result) {
  1801. IPAERR("alloc_chrdev_region err.\n");
  1802. result = -ENODEV;
  1803. goto fail_alloc_chrdev_region;
  1804. }
  1805. ipa_ctx->dev = device_create(ipa_ctx->class, NULL, ipa_ctx->dev_num,
  1806. ipa_ctx, DRV_NAME);
  1807. if (IS_ERR(ipa_ctx->dev)) {
  1808. IPAERR(":device_create err.\n");
  1809. result = -ENODEV;
  1810. goto fail_device_create;
  1811. }
  1812. cdev_init(&ipa_ctx->cdev, &ipa_drv_fops);
  1813. ipa_ctx->cdev.owner = THIS_MODULE;
  1814. ipa_ctx->cdev.ops = &ipa_drv_fops; /* from LDD3 */
  1815. result = cdev_add(&ipa_ctx->cdev, ipa_ctx->dev_num, 1);
  1816. if (result) {
  1817. IPAERR(":cdev_add err=%d\n", -result);
  1818. result = -ENODEV;
  1819. goto fail_cdev_add;
  1820. }
  1821. /* default aggregation parameters */
  1822. ipa_ctx->aggregation_type = IPA_MBIM_16;
  1823. ipa_ctx->aggregation_byte_limit = 1;
  1824. ipa_ctx->aggregation_time_limit = 0;
  1825. if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_PCIE) {
  1826. /* Initialize IPA RM (resource manager) */
  1827. result = ipa_rm_initialize();
  1828. if (result) {
  1829. IPAERR(":cdev_add err=%d\n", -result);
  1830. result = -ENODEV;
  1831. goto fail_ipa_rm_init;
  1832. }
  1833. }
  1834. if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL) {
  1835. a2_mux_init();
  1836. /* Initialize the tethering bridge driver */
  1837. result = teth_bridge_driver_init();
  1838. if (result) {
  1839. IPAERR(":teth_bridge_driver_init() failed\n");
  1840. result = -ENODEV;
  1841. goto fail_cdev_add;
  1842. }
  1843. }
  1844. /* gate IPA clocks */
  1845. if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_NORMAL)
  1846. ipa_disable_clks();
  1847. IPADBG(":IPA driver init OK.\n");
  1848. return 0;
  1849. fail_ipa_rm_init:
  1850. cdev_del(&ipa_ctx->cdev);
  1851. fail_cdev_add:
  1852. device_destroy(ipa_ctx->class, ipa_ctx->dev_num);
  1853. fail_device_create:
  1854. unregister_chrdev_region(ipa_ctx->dev_num, 1);
  1855. fail_alloc_chrdev_region:
  1856. if (ipa_ctx->pipe_mem_pool)
  1857. gen_pool_destroy(ipa_ctx->pipe_mem_pool);
  1858. dma_free_coherent(NULL,
  1859. ipa_ctx->empty_rt_tbl_mem.size,
  1860. ipa_ctx->empty_rt_tbl_mem.base,
  1861. ipa_ctx->empty_rt_tbl_mem.phys_base);
  1862. fail_empty_rt_tbl:
  1863. ipa_cleanup_rx();
  1864. ipa_teardown_a5_pipes();
  1865. fail_a5_pipes:
  1866. destroy_workqueue(ipa_ctx->tx_wq);
  1867. fail_tx_wq:
  1868. destroy_workqueue(ipa_ctx->rx_wq);
  1869. fail_rx_wq:
  1870. /*
  1871. * DMA pool need to be released only for IPA HW v1.0 only.
  1872. */
  1873. if (ipa_ctx->ipa_hw_type == IPA_HW_v1_0)
  1874. dma_pool_destroy(ipa_ctx->dma_pool);
  1875. fail_dma_pool:
  1876. kmem_cache_destroy(ipa_ctx->tree_node_cache);
  1877. fail_tree_node_cache:
  1878. kmem_cache_destroy(ipa_ctx->rx_pkt_wrapper_cache);
  1879. fail_rx_pkt_wrapper_cache:
  1880. kmem_cache_destroy(ipa_ctx->tx_pkt_wrapper_cache);
  1881. fail_tx_pkt_wrapper_cache:
  1882. kmem_cache_destroy(ipa_ctx->rt_tbl_cache);
  1883. fail_rt_tbl_cache:
  1884. kmem_cache_destroy(ipa_ctx->hdr_offset_cache);
  1885. fail_hdr_offset_cache:
  1886. kmem_cache_destroy(ipa_ctx->hdr_cache);
  1887. fail_hdr_cache:
  1888. kmem_cache_destroy(ipa_ctx->rt_rule_cache);
  1889. fail_rt_rule_cache:
  1890. kmem_cache_destroy(ipa_ctx->flt_rule_cache);
  1891. fail_flt_rule_cache:
  1892. sps_deregister_bam_device(ipa_ctx->bam_handle);
  1893. fail_init_hw:
  1894. iounmap(ipa_ctx->mmio);
  1895. fail_remap:
  1896. kfree(ipa_ctx);
  1897. ipa_ctx = NULL;
  1898. fail_mem:
  1899. return result;
  1900. }
  1901. static int ipa_plat_drv_probe(struct platform_device *pdev_p)
  1902. {
  1903. int result = 0;
  1904. struct resource *resource_p;
  1905. IPADBG("IPA plat drv probe\n");
  1906. /* initialize ipa_res */
  1907. ipa_res.ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
  1908. ipa_res.ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
  1909. ipa_res.ipa_hw_type = 0;
  1910. ipa_res.ipa_hw_mode = 0;
  1911. result = ipa_load_pipe_connection(pdev_p,
  1912. A2_TO_IPA,
  1913. &ipa_res.a2_to_ipa_pipe);
  1914. if (0 != result)
  1915. IPAERR(":ipa_load_pipe_connection failed!\n");
  1916. result = ipa_load_pipe_connection(pdev_p, IPA_TO_A2,
  1917. &ipa_res.ipa_to_a2_pipe);
  1918. if (0 != result)
  1919. IPAERR(":ipa_load_pipe_connection failed!\n");
  1920. /* Get IPA wrapper address */
  1921. resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
  1922. "ipa-base");
  1923. if (!resource_p) {
  1924. IPAERR(":get resource failed for ipa-base!\n");
  1925. return -ENODEV;
  1926. } else {
  1927. ipa_res.ipa_mem_base = resource_p->start;
  1928. ipa_res.ipa_mem_size = resource_size(resource_p);
  1929. }
  1930. /* Get IPA BAM address */
  1931. resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
  1932. "bam-base");
  1933. if (!resource_p) {
  1934. IPAERR(":get resource failed for bam-base!\n");
  1935. return -ENODEV;
  1936. } else {
  1937. ipa_res.bam_mem_base = resource_p->start;
  1938. ipa_res.bam_mem_size = resource_size(resource_p);
  1939. }
  1940. /* Get IPA A2 BAM address */
  1941. resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
  1942. "a2-bam-base");
  1943. if (!resource_p) {
  1944. IPAERR(":get resource failed for a2-bam-base!\n");
  1945. return -ENODEV;
  1946. } else {
  1947. ipa_res.a2_bam_mem_base = resource_p->start;
  1948. ipa_res.a2_bam_mem_size = resource_size(resource_p);
  1949. }
  1950. /* Get IPA pipe mem start ofst */
  1951. resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_MEM,
  1952. "ipa-pipe-mem");
  1953. if (!resource_p) {
  1954. IPADBG(":get resource failed for ipa-pipe-mem\n");
  1955. } else {
  1956. ipa_res.ipa_pipe_mem_start_ofst = resource_p->start;
  1957. ipa_res.ipa_pipe_mem_size = resource_size(resource_p);
  1958. }
  1959. /* Get IPA IRQ number */
  1960. resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
  1961. "ipa-irq");
  1962. if (!resource_p) {
  1963. IPAERR(":get resource failed for ipa-irq!\n");
  1964. return -ENODEV;
  1965. } else {
  1966. ipa_res.ipa_irq = resource_p->start;
  1967. }
  1968. /* Get IPA BAM IRQ number */
  1969. resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
  1970. "bam-irq");
  1971. if (!resource_p) {
  1972. IPAERR(":get resource failed for bam-irq!\n");
  1973. return -ENODEV;
  1974. } else {
  1975. ipa_res.bam_irq = resource_p->start;
  1976. }
  1977. /* Get IPA A2 BAM IRQ number */
  1978. resource_p = platform_get_resource_byname(pdev_p, IORESOURCE_IRQ,
  1979. "a2-bam-irq");
  1980. if (!resource_p) {
  1981. IPAERR(":get resource failed for a2-bam-irq!\n");
  1982. return -ENODEV;
  1983. } else {
  1984. ipa_res.a2_bam_irq = resource_p->start;
  1985. }
  1986. /* Get IPA HW Version */
  1987. result = of_property_read_u32(pdev_p->dev.of_node, "qcom,ipa-hw-ver",
  1988. &ipa_res.ipa_hw_type);
  1989. if ((result) || (ipa_res.ipa_hw_type == 0)) {
  1990. IPAERR(":get resource failed for ipa-hw-ver!\n");
  1991. return -ENODEV;
  1992. }
  1993. IPADBG(": found ipa_res.ipa_hw_type = %d", ipa_res.ipa_hw_type);
  1994. /* Get IPA HW mode */
  1995. result = of_property_read_u32(pdev_p->dev.of_node, "ipa-hw-mode",
  1996. &ipa_res.ipa_hw_mode);
  1997. if (result)
  1998. IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
  1999. else
  2000. IPADBG(": found ipa_res.ipa_hw_mode = %d", ipa_res.ipa_hw_mode);
  2001. IPADBG(":ipa_mem_base = 0x%x, ipa_mem_size = 0x%x\n",
  2002. ipa_res.ipa_mem_base, ipa_res.ipa_mem_size);
  2003. IPADBG(":bam_mem_base = 0x%x, bam_mem_size = 0x%x\n",
  2004. ipa_res.bam_mem_base, ipa_res.bam_mem_size);
  2005. IPADBG(":pipe_mem_start_ofst = 0x%x, pipe_mem_size = 0x%x\n",
  2006. ipa_res.ipa_pipe_mem_start_ofst, ipa_res.ipa_pipe_mem_size);
  2007. IPADBG(":ipa_irq = %d\n", ipa_res.ipa_irq);
  2008. IPADBG(":bam_irq = %d\n", ipa_res.bam_irq);
  2009. /* stash the IPA dev ptr */
  2010. ipa_dev = &pdev_p->dev;
  2011. if (ipa_res.ipa_hw_mode == IPA_HW_MODE_NORMAL) {
  2012. /* get IPA clocks */
  2013. if (ipa_get_clks(ipa_dev) != 0) {
  2014. IPAERR(":fail to get clk handle's!\n");
  2015. return -ENODEV;
  2016. }
  2017. /* get BUS handle */
  2018. ipa_bus_hdl =
  2019. msm_bus_scale_register_client(&ipa_bus_client_pdata);
  2020. if (!ipa_bus_hdl) {
  2021. IPAERR(":fail to register with bus mgr!\n");
  2022. return -ENODEV;
  2023. }
  2024. /* enable IPA clocks */
  2025. ipa_enable_clks();
  2026. }
  2027. /* Proceed to real initialization */
  2028. result = ipa_init(&ipa_res);
  2029. if (result) {
  2030. IPAERR("ipa_init failed\n");
  2031. /* gate IPA clocks */
  2032. if (ipa_res.ipa_hw_mode == IPA_HW_MODE_NORMAL)
  2033. ipa_disable_clks();
  2034. }
  2035. result = device_create_file(&pdev_p->dev,
  2036. &dev_attr_aggregation_type);
  2037. if (result)
  2038. IPAERR("failed to create device file\n");
  2039. result = device_create_file(&pdev_p->dev,
  2040. &dev_attr_aggregation_byte_limit);
  2041. if (result)
  2042. IPAERR("failed to create device file\n");
  2043. result = device_create_file(&pdev_p->dev,
  2044. &dev_attr_aggregation_time_limit);
  2045. if (result)
  2046. IPAERR("failed to create device file\n");
  2047. return result;
  2048. }
  2049. static struct platform_driver ipa_plat_drv = {
  2050. .probe = ipa_plat_drv_probe,
  2051. .driver = {
  2052. .name = DRV_NAME,
  2053. .owner = THIS_MODULE,
  2054. .of_match_table = ipa_plat_drv_match,
  2055. },
  2056. };
  2057. struct ipa_context *ipa_get_ctx(void)
  2058. {
  2059. return ipa_ctx;
  2060. }
  2061. static int __init ipa_module_init(void)
  2062. {
  2063. int result = 0;
  2064. IPADBG("IPA module init\n");
  2065. /* Register as a platform device driver */
  2066. platform_driver_register(&ipa_plat_drv);
  2067. ipa_debugfs_init();
  2068. return result;
  2069. }
  2070. subsys_initcall(ipa_module_init);
  2071. MODULE_LICENSE("GPL v2");
  2072. MODULE_DESCRIPTION("IPA HW device driver");