a2_service.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672
  1. /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. /*
  13. * A2 service component
  14. */
  15. #include <net/ip.h>
  16. #include <linux/delay.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/sched.h>
  19. #include <linux/skbuff.h>
  20. #include <linux/clk.h>
  21. #include <linux/wakelock.h>
  22. #include <mach/sps.h>
  23. #include <mach/msm_smsm.h>
  24. #include <mach/socinfo.h>
  25. #include <mach/ipa.h>
  26. #include "ipa_i.h"
  27. #define A2_NUM_PIPES 6
  28. #define A2_SUMMING_THRESHOLD 4096
  29. #define BUFFER_SIZE 2048
  30. #define NUM_BUFFERS 32
  31. #define BAM_CH_LOCAL_OPEN 0x1
  32. #define BAM_CH_REMOTE_OPEN 0x2
  33. #define BAM_CH_IN_RESET 0x4
  34. #define BAM_MUX_HDR_MAGIC_NO 0x33fc
  35. #define BAM_MUX_HDR_CMD_DATA 0
  36. #define BAM_MUX_HDR_CMD_OPEN 1
  37. #define BAM_MUX_HDR_CMD_CLOSE 2
  38. #define BAM_MUX_HDR_CMD_STATUS 3
  39. #define BAM_MUX_HDR_CMD_OPEN_NO_A2_PC 4
  40. #define LOW_WATERMARK 2
  41. #define HIGH_WATERMARK 4
  42. #define A2_MUX_COMPLETION_TIMEOUT (60*HZ)
  43. #define ENABLE_DISCONNECT_ACK 0x1
  44. #define A2_MUX_PADDING_LENGTH(len) (4 - ((len) & 0x3))
  45. struct bam_ch_info {
  46. u32 status;
  47. a2_mux_notify_cb notify_cb;
  48. void *user_data;
  49. spinlock_t lock;
  50. int num_tx_pkts;
  51. int use_wm;
  52. u32 v4_hdr_hdl;
  53. u32 v6_hdr_hdl;
  54. };
  55. struct tx_pkt_info {
  56. struct sk_buff *skb;
  57. char is_cmd;
  58. u32 len;
  59. struct list_head list_node;
  60. unsigned ts_sec;
  61. unsigned long ts_nsec;
  62. };
  63. struct bam_mux_hdr {
  64. u16 magic_num;
  65. u8 reserved;
  66. u8 cmd;
  67. u8 pad_len;
  68. u8 ch_id;
  69. u16 pkt_len;
  70. };
  71. struct a2_mux_context_type {
  72. u32 tethered_prod;
  73. u32 tethered_cons;
  74. u32 embedded_prod;
  75. u32 embedded_cons;
  76. int a2_mux_apps_pc_enabled;
  77. struct work_struct kickoff_ul_wakeup;
  78. struct work_struct kickoff_ul_power_down;
  79. struct work_struct kickoff_ul_request_resource;
  80. struct bam_ch_info bam_ch[A2_MUX_NUM_CHANNELS];
  81. struct list_head bam_tx_pool;
  82. spinlock_t bam_tx_pool_spinlock;
  83. struct workqueue_struct *a2_mux_tx_workqueue;
  84. struct workqueue_struct *a2_mux_rx_workqueue;
  85. int a2_mux_initialized;
  86. bool bam_is_connected;
  87. bool bam_connect_in_progress;
  88. int a2_mux_send_power_vote_on_init_once;
  89. int a2_mux_sw_bridge_is_connected;
  90. bool a2_mux_dl_wakeup;
  91. u32 a2_device_handle;
  92. struct mutex wakeup_lock;
  93. struct completion ul_wakeup_ack_completion;
  94. struct completion bam_connection_completion;
  95. struct completion request_resource_completion;
  96. struct completion dl_wakeup_completion;
  97. rwlock_t ul_wakeup_lock;
  98. int wait_for_ack;
  99. struct wake_lock bam_wakelock;
  100. int a2_pc_disabled;
  101. spinlock_t wakelock_reference_lock;
  102. int wakelock_reference_count;
  103. int a2_pc_disabled_wakelock_skipped;
  104. int disconnect_ack;
  105. struct mutex smsm_cb_lock;
  106. int bam_dmux_uplink_vote;
  107. };
  108. static struct a2_mux_context_type *a2_mux_ctx;
  109. static void handle_a2_mux_cmd(struct sk_buff *rx_skb);
  110. static bool bam_ch_is_open(int index)
  111. {
  112. return a2_mux_ctx->bam_ch[index].status ==
  113. (BAM_CH_LOCAL_OPEN | BAM_CH_REMOTE_OPEN);
  114. }
  115. static bool bam_ch_is_local_open(int index)
  116. {
  117. return a2_mux_ctx->bam_ch[index].status &
  118. BAM_CH_LOCAL_OPEN;
  119. }
  120. static bool bam_ch_is_remote_open(int index)
  121. {
  122. return a2_mux_ctx->bam_ch[index].status &
  123. BAM_CH_REMOTE_OPEN;
  124. }
  125. static bool bam_ch_is_in_reset(int index)
  126. {
  127. return a2_mux_ctx->bam_ch[index].status &
  128. BAM_CH_IN_RESET;
  129. }
  130. static void set_tx_timestamp(struct tx_pkt_info *pkt)
  131. {
  132. unsigned long long t_now;
  133. t_now = sched_clock();
  134. pkt->ts_nsec = do_div(t_now, 1000000000U);
  135. pkt->ts_sec = (unsigned)t_now;
  136. }
  137. static void verify_tx_queue_is_empty(const char *func)
  138. {
  139. unsigned long flags;
  140. struct tx_pkt_info *info;
  141. int reported = 0;
  142. spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
  143. list_for_each_entry(info, &a2_mux_ctx->bam_tx_pool, list_node) {
  144. if (!reported) {
  145. IPADBG("%s: tx pool not empty\n", func);
  146. reported = 1;
  147. }
  148. IPADBG("%s: node=%p ts=%u.%09lu\n", __func__,
  149. &info->list_node, info->ts_sec, info->ts_nsec);
  150. }
  151. spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
  152. }
  153. static void grab_wakelock(void)
  154. {
  155. unsigned long flags;
  156. spin_lock_irqsave(&a2_mux_ctx->wakelock_reference_lock, flags);
  157. IPADBG("%s: ref count = %d\n",
  158. __func__,
  159. a2_mux_ctx->wakelock_reference_count);
  160. if (a2_mux_ctx->wakelock_reference_count == 0)
  161. wake_lock(&a2_mux_ctx->bam_wakelock);
  162. ++a2_mux_ctx->wakelock_reference_count;
  163. spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags);
  164. }
  165. static void release_wakelock(void)
  166. {
  167. unsigned long flags;
  168. spin_lock_irqsave(&a2_mux_ctx->wakelock_reference_lock, flags);
  169. if (a2_mux_ctx->wakelock_reference_count == 0) {
  170. IPAERR("%s: bam_dmux wakelock not locked\n", __func__);
  171. dump_stack();
  172. spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock,
  173. flags);
  174. return;
  175. }
  176. IPADBG("%s: ref count = %d\n",
  177. __func__,
  178. a2_mux_ctx->wakelock_reference_count);
  179. --a2_mux_ctx->wakelock_reference_count;
  180. if (a2_mux_ctx->wakelock_reference_count == 0)
  181. wake_unlock(&a2_mux_ctx->bam_wakelock);
  182. spin_unlock_irqrestore(&a2_mux_ctx->wakelock_reference_lock, flags);
  183. }
  184. static void toggle_apps_ack(void)
  185. {
  186. static unsigned int clear_bit; /* 0 = set the bit, else clear bit */
  187. IPADBG("%s: apps ack %d->%d\n", __func__,
  188. clear_bit & 0x1, ~clear_bit & 0x1);
  189. smsm_change_state(SMSM_APPS_STATE,
  190. clear_bit & SMSM_A2_POWER_CONTROL_ACK,
  191. ~clear_bit & SMSM_A2_POWER_CONTROL_ACK);
  192. IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_apps_acks);
  193. clear_bit = ~clear_bit;
  194. }
  195. static void power_vote(int vote)
  196. {
  197. IPADBG("%s: curr=%d, vote=%d\n",
  198. __func__,
  199. a2_mux_ctx->bam_dmux_uplink_vote, vote);
  200. if (a2_mux_ctx->bam_dmux_uplink_vote == vote)
  201. IPADBG("%s: warning - duplicate power vote\n", __func__);
  202. a2_mux_ctx->bam_dmux_uplink_vote = vote;
  203. if (vote) {
  204. smsm_change_state(SMSM_APPS_STATE, 0, SMSM_A2_POWER_CONTROL);
  205. IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_on_reqs_out);
  206. } else {
  207. smsm_change_state(SMSM_APPS_STATE, SMSM_A2_POWER_CONTROL, 0);
  208. IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_off_reqs_out);
  209. }
  210. }
  211. static inline void ul_powerdown(void)
  212. {
  213. IPADBG("%s: powerdown\n", __func__);
  214. verify_tx_queue_is_empty(__func__);
  215. if (a2_mux_ctx->a2_pc_disabled)
  216. release_wakelock();
  217. else {
  218. a2_mux_ctx->wait_for_ack = 1;
  219. INIT_COMPLETION(a2_mux_ctx->ul_wakeup_ack_completion);
  220. power_vote(0);
  221. }
  222. }
  223. static void ul_wakeup(void)
  224. {
  225. int ret;
  226. mutex_lock(&a2_mux_ctx->wakeup_lock);
  227. if (a2_mux_ctx->bam_is_connected &&
  228. !a2_mux_ctx->bam_connect_in_progress) {
  229. IPADBG("%s Already awake\n", __func__);
  230. mutex_unlock(&a2_mux_ctx->wakeup_lock);
  231. return;
  232. }
  233. if (a2_mux_ctx->a2_pc_disabled) {
  234. /*
  235. * don't grab the wakelock the first time because it is
  236. * already grabbed when a2 powers on
  237. */
  238. if (likely(a2_mux_ctx->a2_pc_disabled_wakelock_skipped))
  239. grab_wakelock();
  240. else
  241. a2_mux_ctx->a2_pc_disabled_wakelock_skipped = 1;
  242. mutex_unlock(&a2_mux_ctx->wakeup_lock);
  243. return;
  244. }
  245. /*
  246. * must wait for the previous power down request to have been acked
  247. * chances are it already came in and this will just fall through
  248. * instead of waiting
  249. */
  250. if (a2_mux_ctx->wait_for_ack) {
  251. IPADBG("%s waiting for previous ack\n", __func__);
  252. ret = wait_for_completion_timeout(
  253. &a2_mux_ctx->ul_wakeup_ack_completion,
  254. A2_MUX_COMPLETION_TIMEOUT);
  255. a2_mux_ctx->wait_for_ack = 0;
  256. if (unlikely(ret == 0)) {
  257. IPAERR("%s previous ack from modem timed out\n",
  258. __func__);
  259. goto bail;
  260. }
  261. }
  262. INIT_COMPLETION(a2_mux_ctx->ul_wakeup_ack_completion);
  263. power_vote(1);
  264. IPADBG("%s waiting for wakeup ack\n", __func__);
  265. ret = wait_for_completion_timeout(&a2_mux_ctx->ul_wakeup_ack_completion,
  266. A2_MUX_COMPLETION_TIMEOUT);
  267. if (unlikely(ret == 0)) {
  268. IPAERR("%s wakup ack from modem timed out\n", __func__);
  269. goto bail;
  270. }
  271. INIT_COMPLETION(a2_mux_ctx->bam_connection_completion);
  272. if (!a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
  273. ret = wait_for_completion_timeout(
  274. &a2_mux_ctx->bam_connection_completion,
  275. A2_MUX_COMPLETION_TIMEOUT);
  276. if (unlikely(ret == 0)) {
  277. IPAERR("%s modem power on timed out\n", __func__);
  278. goto bail;
  279. }
  280. }
  281. IPADBG("%s complete\n", __func__);
  282. mutex_unlock(&a2_mux_ctx->wakeup_lock);
  283. return;
  284. bail:
  285. mutex_unlock(&a2_mux_ctx->wakeup_lock);
  286. BUG();
  287. return;
  288. }
  289. static void a2_mux_write_done(bool is_tethered, struct sk_buff *skb)
  290. {
  291. struct tx_pkt_info *info;
  292. enum a2_mux_logical_channel_id lcid;
  293. unsigned long event_data;
  294. unsigned long flags;
  295. spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
  296. info = list_first_entry(&a2_mux_ctx->bam_tx_pool,
  297. struct tx_pkt_info, list_node);
  298. if (unlikely(info->skb != skb)) {
  299. struct tx_pkt_info *errant_pkt;
  300. IPAERR("tx_pool mismatch next=%p list_node=%p, ts=%u.%09lu\n",
  301. a2_mux_ctx->bam_tx_pool.next,
  302. &info->list_node,
  303. info->ts_sec, info->ts_nsec
  304. );
  305. list_for_each_entry(errant_pkt,
  306. &a2_mux_ctx->bam_tx_pool, list_node) {
  307. IPAERR("%s: node=%p ts=%u.%09lu\n", __func__,
  308. &errant_pkt->list_node, errant_pkt->ts_sec,
  309. errant_pkt->ts_nsec);
  310. if (errant_pkt->skb == skb)
  311. info = errant_pkt;
  312. }
  313. spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
  314. flags);
  315. BUG();
  316. }
  317. list_del(&info->list_node);
  318. spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
  319. if (info->is_cmd) {
  320. dev_kfree_skb_any(info->skb);
  321. kfree(info);
  322. return;
  323. }
  324. skb = info->skb;
  325. kfree(info);
  326. event_data = (unsigned long)(skb);
  327. if (is_tethered)
  328. lcid = A2_MUX_TETHERED_0;
  329. else {
  330. struct bam_mux_hdr *hdr = (struct bam_mux_hdr *)skb->data;
  331. lcid = (enum a2_mux_logical_channel_id) hdr->ch_id;
  332. }
  333. spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  334. a2_mux_ctx->bam_ch[lcid].num_tx_pkts--;
  335. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  336. if (a2_mux_ctx->bam_ch[lcid].notify_cb)
  337. a2_mux_ctx->bam_ch[lcid].notify_cb(
  338. a2_mux_ctx->bam_ch[lcid].user_data, A2_MUX_WRITE_DONE,
  339. event_data);
  340. else
  341. dev_kfree_skb_any(skb);
  342. }
  343. static bool a2_mux_kickoff_ul_power_down(void)
  344. {
  345. bool is_connected;
  346. write_lock(&a2_mux_ctx->ul_wakeup_lock);
  347. if (a2_mux_ctx->bam_connect_in_progress) {
  348. a2_mux_ctx->bam_is_connected = false;
  349. is_connected = true;
  350. } else {
  351. is_connected = a2_mux_ctx->bam_is_connected;
  352. a2_mux_ctx->bam_is_connected = false;
  353. if (is_connected) {
  354. a2_mux_ctx->bam_connect_in_progress = true;
  355. queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
  356. &a2_mux_ctx->kickoff_ul_power_down);
  357. }
  358. }
  359. write_unlock(&a2_mux_ctx->ul_wakeup_lock);
  360. return is_connected;
  361. }
  362. static bool a2_mux_kickoff_ul_wakeup(void)
  363. {
  364. bool is_connected;
  365. write_lock(&a2_mux_ctx->ul_wakeup_lock);
  366. if (a2_mux_ctx->bam_connect_in_progress) {
  367. a2_mux_ctx->bam_is_connected = true;
  368. is_connected = false;
  369. } else {
  370. is_connected = a2_mux_ctx->bam_is_connected;
  371. a2_mux_ctx->bam_is_connected = true;
  372. if (!is_connected) {
  373. a2_mux_ctx->bam_connect_in_progress = true;
  374. queue_work(a2_mux_ctx->a2_mux_tx_workqueue,
  375. &a2_mux_ctx->kickoff_ul_wakeup);
  376. }
  377. }
  378. write_unlock(&a2_mux_ctx->ul_wakeup_lock);
  379. return is_connected;
  380. }
  381. static void kickoff_ul_power_down_func(struct work_struct *work)
  382. {
  383. bool is_connected;
  384. IPADBG("%s: UL active - forcing powerdown\n", __func__);
  385. ul_powerdown();
  386. write_lock(&a2_mux_ctx->ul_wakeup_lock);
  387. is_connected = a2_mux_ctx->bam_is_connected;
  388. a2_mux_ctx->bam_is_connected = false;
  389. a2_mux_ctx->bam_connect_in_progress = false;
  390. write_unlock(&a2_mux_ctx->ul_wakeup_lock);
  391. if (is_connected)
  392. a2_mux_kickoff_ul_wakeup();
  393. else
  394. ipa_rm_notify_completion(IPA_RM_RESOURCE_RELEASED,
  395. IPA_RM_RESOURCE_A2_CONS);
  396. }
  397. static void kickoff_ul_wakeup_func(struct work_struct *work)
  398. {
  399. bool is_connected;
  400. int ret;
  401. ul_wakeup();
  402. write_lock(&a2_mux_ctx->ul_wakeup_lock);
  403. is_connected = a2_mux_ctx->bam_is_connected;
  404. a2_mux_ctx->bam_is_connected = true;
  405. a2_mux_ctx->bam_connect_in_progress = false;
  406. write_unlock(&a2_mux_ctx->ul_wakeup_lock);
  407. if (is_connected)
  408. ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
  409. IPA_RM_RESOURCE_A2_CONS);
  410. INIT_COMPLETION(a2_mux_ctx->dl_wakeup_completion);
  411. if (!a2_mux_ctx->a2_mux_dl_wakeup) {
  412. ret = wait_for_completion_timeout(
  413. &a2_mux_ctx->dl_wakeup_completion,
  414. A2_MUX_COMPLETION_TIMEOUT);
  415. if (unlikely(ret == 0)) {
  416. IPAERR("%s timeout waiting for A2 PROD granted\n",
  417. __func__);
  418. BUG();
  419. return;
  420. }
  421. }
  422. if (!is_connected)
  423. a2_mux_kickoff_ul_power_down();
  424. }
  425. static void kickoff_ul_request_resource_func(struct work_struct *work)
  426. {
  427. int ret;
  428. INIT_COMPLETION(a2_mux_ctx->request_resource_completion);
  429. ret = ipa_rm_request_resource(IPA_RM_RESOURCE_A2_PROD);
  430. if (ret < 0 && ret != -EINPROGRESS) {
  431. IPAERR("%s: ipa_rm_request_resource failed %d\n", __func__,
  432. ret);
  433. return;
  434. }
  435. if (ret == -EINPROGRESS) {
  436. ret = wait_for_completion_timeout(
  437. &a2_mux_ctx->request_resource_completion,
  438. A2_MUX_COMPLETION_TIMEOUT);
  439. if (unlikely(ret == 0)) {
  440. IPAERR("%s timeout waiting for A2 PROD granted\n",
  441. __func__);
  442. BUG();
  443. return;
  444. }
  445. }
  446. toggle_apps_ack();
  447. a2_mux_ctx->a2_mux_dl_wakeup = true;
  448. complete_all(&a2_mux_ctx->dl_wakeup_completion);
  449. }
  450. static void ipa_embedded_notify(void *priv,
  451. enum ipa_dp_evt_type evt,
  452. unsigned long data)
  453. {
  454. switch (evt) {
  455. case IPA_RECEIVE:
  456. handle_a2_mux_cmd((struct sk_buff *)data);
  457. break;
  458. case IPA_WRITE_DONE:
  459. a2_mux_write_done(false, (struct sk_buff *)data);
  460. break;
  461. default:
  462. IPAERR("%s: Unknown event %d\n", __func__, evt);
  463. break;
  464. }
  465. }
  466. static void ipa_tethered_notify(void *priv,
  467. enum ipa_dp_evt_type evt,
  468. unsigned long data)
  469. {
  470. IPADBG("%s: event = %d\n", __func__, evt);
  471. switch (evt) {
  472. case IPA_RECEIVE:
  473. if (a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].notify_cb)
  474. a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].notify_cb(
  475. a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].user_data,
  476. A2_MUX_RECEIVE,
  477. data);
  478. break;
  479. case IPA_WRITE_DONE:
  480. a2_mux_write_done(true, (struct sk_buff *)data);
  481. break;
  482. default:
  483. IPAERR("%s: Unknown event %d\n", __func__, evt);
  484. break;
  485. }
  486. }
  487. static int connect_to_bam(void)
  488. {
  489. int ret;
  490. struct ipa_sys_connect_params connect_params;
  491. IPAERR("%s:\n", __func__);
  492. if (a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
  493. IPAERR("%s: SW bridge is already UP\n",
  494. __func__);
  495. return -EFAULT;
  496. }
  497. if (sps_ctrl_bam_dma_clk(true))
  498. WARN_ON(1);
  499. memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
  500. connect_params.client = IPA_CLIENT_A2_TETHERED_CONS;
  501. connect_params.notify = ipa_tethered_notify;
  502. connect_params.desc_fifo_sz = 0x800;
  503. ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
  504. &connect_params,
  505. &a2_mux_ctx->tethered_prod);
  506. if (ret) {
  507. IPAERR("%s: IPA bridge tethered UL failed to connect: %d\n",
  508. __func__, ret);
  509. goto bridge_tethered_ul_failed;
  510. }
  511. memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
  512. connect_params.ipa_ep_cfg.mode.mode = IPA_DMA;
  513. connect_params.ipa_ep_cfg.mode.dst = IPA_CLIENT_USB_CONS;
  514. connect_params.client = IPA_CLIENT_A2_TETHERED_PROD;
  515. connect_params.notify = ipa_tethered_notify;
  516. connect_params.desc_fifo_sz = 0x800;
  517. ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
  518. &connect_params,
  519. &a2_mux_ctx->tethered_cons);
  520. if (ret) {
  521. IPAERR("%s: IPA bridge tethered DL failed to connect: %d\n",
  522. __func__, ret);
  523. goto bridge_tethered_dl_failed;
  524. }
  525. memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
  526. connect_params.ipa_ep_cfg.hdr.hdr_len = sizeof(struct bam_mux_hdr);
  527. connect_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
  528. connect_params.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 6;
  529. connect_params.client = IPA_CLIENT_A2_EMBEDDED_CONS;
  530. connect_params.notify = ipa_embedded_notify;
  531. connect_params.desc_fifo_sz = 0x800;
  532. ret = ipa_bridge_setup(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
  533. &connect_params,
  534. &a2_mux_ctx->embedded_prod);
  535. if (ret) {
  536. IPAERR("%s: IPA bridge embedded UL failed to connect: %d\n",
  537. __func__, ret);
  538. goto bridge_embedded_ul_failed;
  539. }
  540. memset(&connect_params, 0, sizeof(struct ipa_sys_connect_params));
  541. connect_params.ipa_ep_cfg.hdr.hdr_len = sizeof(struct bam_mux_hdr);
  542. connect_params.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
  543. connect_params.ipa_ep_cfg.hdr.hdr_ofst_metadata = 4;
  544. connect_params.client = IPA_CLIENT_A2_EMBEDDED_PROD;
  545. connect_params.notify = ipa_embedded_notify;
  546. connect_params.desc_fifo_sz = 0x800;
  547. ret = ipa_bridge_setup(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_EMBEDDED,
  548. &connect_params,
  549. &a2_mux_ctx->embedded_cons);
  550. if (ret) {
  551. IPAERR("%s: IPA bridge embedded DL failed to connect: %d\n",
  552. __func__, ret);
  553. goto bridge_embedded_dl_failed;
  554. }
  555. a2_mux_ctx->a2_mux_sw_bridge_is_connected = 1;
  556. complete_all(&a2_mux_ctx->bam_connection_completion);
  557. return 0;
  558. bridge_embedded_dl_failed:
  559. ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
  560. a2_mux_ctx->embedded_prod);
  561. bridge_embedded_ul_failed:
  562. ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
  563. a2_mux_ctx->tethered_cons);
  564. bridge_tethered_dl_failed:
  565. ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
  566. a2_mux_ctx->tethered_prod);
  567. bridge_tethered_ul_failed:
  568. if (sps_ctrl_bam_dma_clk(false))
  569. WARN_ON(1);
  570. return ret;
  571. }
  572. static int disconnect_to_bam(void)
  573. {
  574. int ret;
  575. IPAERR("%s\n", __func__);
  576. if (!a2_mux_ctx->a2_mux_sw_bridge_is_connected) {
  577. IPAERR("%s: SW bridge is already DOWN\n",
  578. __func__);
  579. return -EFAULT;
  580. }
  581. ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_TETHERED,
  582. a2_mux_ctx->tethered_prod);
  583. if (ret) {
  584. IPAERR("%s: IPA bridge tethered UL failed to disconnect: %d\n",
  585. __func__, ret);
  586. return ret;
  587. }
  588. ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_TETHERED,
  589. a2_mux_ctx->tethered_cons);
  590. if (ret) {
  591. IPAERR("%s: IPA bridge tethered DL failed to disconnect: %d\n",
  592. __func__, ret);
  593. return ret;
  594. }
  595. ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_UL, IPA_BRIDGE_TYPE_EMBEDDED,
  596. a2_mux_ctx->embedded_prod);
  597. if (ret) {
  598. IPAERR("%s: IPA bridge embedded UL failed to disconnect: %d\n",
  599. __func__, ret);
  600. return ret;
  601. }
  602. ret = ipa_bridge_teardown(IPA_BRIDGE_DIR_DL, IPA_BRIDGE_TYPE_EMBEDDED,
  603. a2_mux_ctx->embedded_cons);
  604. if (ret) {
  605. IPAERR("%s: IPA bridge embedded DL failed to disconnect: %d\n",
  606. __func__, ret);
  607. return ret;
  608. }
  609. if (sps_ctrl_bam_dma_clk(false))
  610. WARN_ON(1);
  611. verify_tx_queue_is_empty(__func__);
  612. (void) ipa_rm_release_resource(IPA_RM_RESOURCE_A2_PROD);
  613. if (a2_mux_ctx->disconnect_ack)
  614. toggle_apps_ack();
  615. a2_mux_ctx->a2_mux_dl_wakeup = false;
  616. a2_mux_ctx->a2_mux_sw_bridge_is_connected = 0;
  617. complete_all(&a2_mux_ctx->bam_connection_completion);
  618. return 0;
  619. }
  620. static void a2_mux_smsm_cb(void *priv,
  621. u32 old_state,
  622. u32 new_state)
  623. {
  624. static int last_processed_state;
  625. mutex_lock(&a2_mux_ctx->smsm_cb_lock);
  626. IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
  627. new_state);
  628. if (last_processed_state == (new_state & SMSM_A2_POWER_CONTROL)) {
  629. IPADBG("%s: already processed this state\n", __func__);
  630. mutex_unlock(&a2_mux_ctx->smsm_cb_lock);
  631. return;
  632. }
  633. last_processed_state = new_state & SMSM_A2_POWER_CONTROL;
  634. if (new_state & SMSM_A2_POWER_CONTROL) {
  635. IPADBG("%s: MODEM PWR CTRL 1\n", __func__);
  636. IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_on_reqs_in);
  637. grab_wakelock();
  638. (void) connect_to_bam();
  639. queue_work(a2_mux_ctx->a2_mux_rx_workqueue,
  640. &a2_mux_ctx->kickoff_ul_request_resource);
  641. } else if (!(new_state & SMSM_A2_POWER_CONTROL)) {
  642. IPADBG("%s: MODEM PWR CTRL 0\n", __func__);
  643. IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_off_reqs_in);
  644. (void) disconnect_to_bam();
  645. release_wakelock();
  646. } else {
  647. IPAERR("%s: unsupported state change\n", __func__);
  648. }
  649. mutex_unlock(&a2_mux_ctx->smsm_cb_lock);
  650. }
  651. static void a2_mux_smsm_ack_cb(void *priv, u32 old_state,
  652. u32 new_state)
  653. {
  654. IPADBG("%s: 0x%08x -> 0x%08x\n", __func__, old_state,
  655. new_state);
  656. IPA_STATS_INC_CNT(ipa_ctx->stats.a2_power_modem_acks);
  657. complete_all(&a2_mux_ctx->ul_wakeup_ack_completion);
  658. }
  659. static int a2_mux_pm_rm_request_resource(void)
  660. {
  661. int result = 0;
  662. bool is_connected;
  663. is_connected = a2_mux_kickoff_ul_wakeup();
  664. if (!is_connected)
  665. result = -EINPROGRESS;
  666. return result;
  667. }
  668. static int a2_mux_pm_rm_release_resource(void)
  669. {
  670. int result = 0;
  671. bool is_connected;
  672. is_connected = a2_mux_kickoff_ul_power_down();
  673. if (is_connected)
  674. result = -EINPROGRESS;
  675. return result;
  676. }
  677. static void a2_mux_pm_rm_notify_cb(void *user_data,
  678. enum ipa_rm_event event,
  679. unsigned long data)
  680. {
  681. switch (event) {
  682. case IPA_RM_RESOURCE_GRANTED:
  683. IPADBG("%s: PROD GRANTED CB\n", __func__);
  684. complete_all(&a2_mux_ctx->request_resource_completion);
  685. break;
  686. case IPA_RM_RESOURCE_RELEASED:
  687. IPADBG("%s: PROD RELEASED CB\n", __func__);
  688. break;
  689. default:
  690. return;
  691. }
  692. }
  693. static int a2_mux_pm_initialize_rm(void)
  694. {
  695. struct ipa_rm_create_params create_params;
  696. int result;
  697. memset(&create_params, 0, sizeof(create_params));
  698. create_params.name = IPA_RM_RESOURCE_A2_PROD;
  699. create_params.reg_params.notify_cb = &a2_mux_pm_rm_notify_cb;
  700. result = ipa_rm_create_resource(&create_params);
  701. if (result)
  702. goto bail;
  703. memset(&create_params, 0, sizeof(create_params));
  704. create_params.name = IPA_RM_RESOURCE_A2_CONS;
  705. create_params.release_resource = &a2_mux_pm_rm_release_resource;
  706. create_params.request_resource = &a2_mux_pm_rm_request_resource;
  707. result = ipa_rm_create_resource(&create_params);
  708. bail:
  709. return result;
  710. }
  711. static void a2_mux_process_data(struct sk_buff *rx_skb)
  712. {
  713. unsigned long flags;
  714. struct bam_mux_hdr *rx_hdr;
  715. unsigned long event_data;
  716. rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
  717. rx_skb->data = (unsigned char *)(rx_hdr + 1);
  718. rx_skb->tail = rx_skb->data + rx_hdr->pkt_len;
  719. rx_skb->len = rx_hdr->pkt_len;
  720. rx_skb->truesize = rx_hdr->pkt_len + sizeof(struct sk_buff);
  721. event_data = (unsigned long)(rx_skb);
  722. spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
  723. if (a2_mux_ctx->bam_ch[rx_hdr->ch_id].notify_cb)
  724. a2_mux_ctx->bam_ch[rx_hdr->ch_id].notify_cb(
  725. a2_mux_ctx->bam_ch[rx_hdr->ch_id].user_data,
  726. A2_MUX_RECEIVE,
  727. event_data);
  728. else
  729. dev_kfree_skb_any(rx_skb);
  730. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
  731. flags);
  732. }
  733. static void handle_a2_mux_cmd_open(struct bam_mux_hdr *rx_hdr)
  734. {
  735. unsigned long flags;
  736. spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
  737. a2_mux_ctx->bam_ch[rx_hdr->ch_id].status |= BAM_CH_REMOTE_OPEN;
  738. a2_mux_ctx->bam_ch[rx_hdr->ch_id].num_tx_pkts = 0;
  739. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
  740. flags);
  741. }
  742. static void handle_a2_mux_cmd(struct sk_buff *rx_skb)
  743. {
  744. unsigned long flags;
  745. struct bam_mux_hdr *rx_hdr;
  746. rx_hdr = (struct bam_mux_hdr *)rx_skb->data;
  747. IPADBG("%s: magic %x reserved %d cmd %d pad %d ch %d len %d\n",
  748. __func__,
  749. rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
  750. rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
  751. rx_hdr->magic_num = ntohs(rx_hdr->magic_num);
  752. rx_hdr->pkt_len = ntohs(rx_hdr->pkt_len);
  753. IPADBG("%s: converted to host order magic_num=%d, pkt_len=%d\n",
  754. __func__, rx_hdr->magic_num, rx_hdr->pkt_len);
  755. if (rx_hdr->magic_num != BAM_MUX_HDR_MAGIC_NO) {
  756. IPAERR("bad hdr magic %x rvd %d cmd %d pad %d ch %d len %d\n",
  757. rx_hdr->magic_num, rx_hdr->reserved, rx_hdr->cmd,
  758. rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
  759. dev_kfree_skb_any(rx_skb);
  760. return;
  761. }
  762. if (rx_hdr->ch_id >= A2_MUX_NUM_CHANNELS) {
  763. IPAERR("bad LCID %d rsvd %d cmd %d pad %d ch %d len %d\n",
  764. rx_hdr->ch_id, rx_hdr->reserved, rx_hdr->cmd,
  765. rx_hdr->pad_len, rx_hdr->ch_id, rx_hdr->pkt_len);
  766. dev_kfree_skb_any(rx_skb);
  767. return;
  768. }
  769. switch (rx_hdr->cmd) {
  770. case BAM_MUX_HDR_CMD_DATA:
  771. a2_mux_process_data(rx_skb);
  772. break;
  773. case BAM_MUX_HDR_CMD_OPEN:
  774. IPADBG("%s: opening cid %d PC enabled\n", __func__,
  775. rx_hdr->ch_id);
  776. handle_a2_mux_cmd_open(rx_hdr);
  777. if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
  778. IPADBG("%s: deactivating disconnect ack\n",
  779. __func__);
  780. a2_mux_ctx->disconnect_ack = 0;
  781. }
  782. dev_kfree_skb_any(rx_skb);
  783. if (a2_mux_ctx->a2_mux_send_power_vote_on_init_once) {
  784. kickoff_ul_wakeup_func(NULL);
  785. a2_mux_ctx->a2_mux_send_power_vote_on_init_once = 0;
  786. }
  787. break;
  788. case BAM_MUX_HDR_CMD_OPEN_NO_A2_PC:
  789. IPADBG("%s: opening cid %d PC disabled\n", __func__,
  790. rx_hdr->ch_id);
  791. if (!a2_mux_ctx->a2_pc_disabled) {
  792. a2_mux_ctx->a2_pc_disabled = 1;
  793. ul_wakeup();
  794. }
  795. handle_a2_mux_cmd_open(rx_hdr);
  796. dev_kfree_skb_any(rx_skb);
  797. break;
  798. case BAM_MUX_HDR_CMD_CLOSE:
  799. /* probably should drop pending write */
  800. IPADBG("%s: closing cid %d\n", __func__,
  801. rx_hdr->ch_id);
  802. spin_lock_irqsave(&a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock,
  803. flags);
  804. a2_mux_ctx->bam_ch[rx_hdr->ch_id].status &=
  805. ~BAM_CH_REMOTE_OPEN;
  806. spin_unlock_irqrestore(
  807. &a2_mux_ctx->bam_ch[rx_hdr->ch_id].lock, flags);
  808. dev_kfree_skb_any(rx_skb);
  809. break;
  810. default:
  811. IPAERR("bad hdr.magic %x rvd %d cmd %d pad %d ch %d len %d\n",
  812. rx_hdr->magic_num, rx_hdr->reserved,
  813. rx_hdr->cmd, rx_hdr->pad_len, rx_hdr->ch_id,
  814. rx_hdr->pkt_len);
  815. dev_kfree_skb_any(rx_skb);
  816. return;
  817. }
  818. }
  819. static int a2_mux_write_cmd(void *data, u32 len)
  820. {
  821. int rc;
  822. struct tx_pkt_info *pkt;
  823. unsigned long flags;
  824. pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
  825. if (pkt == NULL) {
  826. IPAERR("%s: mem alloc for tx_pkt_info failed\n", __func__);
  827. return -ENOMEM;
  828. }
  829. pkt->skb = __dev_alloc_skb(len, GFP_NOWAIT | __GFP_NOWARN);
  830. if (pkt->skb == NULL) {
  831. IPAERR("%s: unable to alloc skb\n\n", __func__);
  832. kfree(pkt);
  833. return -ENOMEM;
  834. }
  835. memcpy(skb_put(pkt->skb, len), data, len);
  836. kfree(data);
  837. pkt->len = len;
  838. pkt->is_cmd = 1;
  839. set_tx_timestamp(pkt);
  840. spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
  841. list_add_tail(&pkt->list_node, &a2_mux_ctx->bam_tx_pool);
  842. rc = ipa_tx_dp(IPA_CLIENT_A2_EMBEDDED_CONS, pkt->skb, NULL);
  843. if (rc) {
  844. IPAERR("%s ipa_tx_dp failed rc=%d\n",
  845. __func__, rc);
  846. list_del(&pkt->list_node);
  847. spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
  848. flags);
  849. dev_kfree_skb_any(pkt->skb);
  850. kfree(pkt);
  851. } else {
  852. spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
  853. flags);
  854. }
  855. return rc;
  856. }
  857. /**
  858. * a2_mux_get_tethered_client_handles() - provide the tethred
  859. * pipe handles for post setup configuration
  860. * @lcid: logical channel ID
  861. * @clnt_cons_handle: [out] consumer pipe handle
  862. * @clnt_prod_handle: [out] producer pipe handle
  863. *
  864. * Returns: 0 on success, negative on failure
  865. */
  866. int a2_mux_get_tethered_client_handles(enum a2_mux_logical_channel_id lcid,
  867. unsigned int *clnt_cons_handle,
  868. unsigned int *clnt_prod_handle)
  869. {
  870. if (!a2_mux_ctx->a2_mux_initialized || lcid != A2_MUX_TETHERED_0)
  871. return -ENODEV;
  872. if (!clnt_cons_handle || !clnt_prod_handle)
  873. return -EINVAL;
  874. *clnt_prod_handle = a2_mux_ctx->tethered_prod;
  875. *clnt_cons_handle = a2_mux_ctx->tethered_cons;
  876. return 0;
  877. }
  878. /**
  879. * a2_mux_write() - send the packet to A2,
  880. * add MUX header acc to lcid provided
  881. * @id: logical channel ID
  882. * @skb: SKB to write
  883. *
  884. * Returns: 0 on success, negative on failure
  885. */
  886. int a2_mux_write(enum a2_mux_logical_channel_id id, struct sk_buff *skb)
  887. {
  888. int rc = 0;
  889. struct bam_mux_hdr *hdr;
  890. unsigned long flags;
  891. struct sk_buff *new_skb = NULL;
  892. struct tx_pkt_info *pkt;
  893. bool is_connected;
  894. if (id >= A2_MUX_NUM_CHANNELS)
  895. return -EINVAL;
  896. if (!skb)
  897. return -EINVAL;
  898. if (!a2_mux_ctx->a2_mux_initialized)
  899. return -ENODEV;
  900. spin_lock_irqsave(&a2_mux_ctx->bam_ch[id].lock, flags);
  901. if (!bam_ch_is_open(id)) {
  902. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
  903. IPAERR("%s: port not open: %d\n",
  904. __func__,
  905. a2_mux_ctx->bam_ch[id].status);
  906. return -ENODEV;
  907. }
  908. if (a2_mux_ctx->bam_ch[id].use_wm &&
  909. (a2_mux_ctx->bam_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
  910. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
  911. IPAERR("%s: watermark exceeded: %d\n", __func__, id);
  912. return -EAGAIN;
  913. }
  914. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
  915. read_lock(&a2_mux_ctx->ul_wakeup_lock);
  916. is_connected = a2_mux_ctx->bam_is_connected &&
  917. !a2_mux_ctx->bam_connect_in_progress;
  918. read_unlock(&a2_mux_ctx->ul_wakeup_lock);
  919. if (!is_connected)
  920. return -ENODEV;
  921. if (id != A2_MUX_TETHERED_0) {
  922. /*
  923. * if skb do not have any tailroom for padding
  924. * copy the skb into a new expanded skb
  925. */
  926. if ((skb->len & 0x3) &&
  927. (skb_tailroom(skb) < A2_MUX_PADDING_LENGTH(skb->len))) {
  928. new_skb = skb_copy_expand(skb, skb_headroom(skb),
  929. A2_MUX_PADDING_LENGTH(skb->len),
  930. GFP_ATOMIC);
  931. if (new_skb == NULL) {
  932. IPAERR("%s: cannot allocate skb\n", __func__);
  933. rc = -ENOMEM;
  934. goto write_fail;
  935. }
  936. dev_kfree_skb_any(skb);
  937. skb = new_skb;
  938. }
  939. hdr = (struct bam_mux_hdr *)skb_push(
  940. skb, sizeof(struct bam_mux_hdr));
  941. /*
  942. * caller should allocate for hdr and padding
  943. * hdr is fine, padding is tricky
  944. */
  945. hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
  946. hdr->cmd = BAM_MUX_HDR_CMD_DATA;
  947. hdr->reserved = 0;
  948. hdr->ch_id = id;
  949. hdr->pkt_len = skb->len - sizeof(struct bam_mux_hdr);
  950. if (skb->len & 0x3)
  951. skb_put(skb, A2_MUX_PADDING_LENGTH(skb->len));
  952. hdr->pad_len = skb->len - (sizeof(struct bam_mux_hdr) +
  953. hdr->pkt_len);
  954. IPADBG("data %p, tail %p skb len %d pkt len %d pad len %d\n",
  955. skb->data, skb->tail, skb->len,
  956. hdr->pkt_len, hdr->pad_len);
  957. hdr->magic_num = htons(hdr->magic_num);
  958. hdr->pkt_len = htons(hdr->pkt_len);
  959. IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
  960. hdr->magic_num, hdr->pkt_len);
  961. }
  962. pkt = kmalloc(sizeof(struct tx_pkt_info), GFP_ATOMIC);
  963. if (pkt == NULL) {
  964. IPAERR("%s: mem alloc for tx_pkt_info failed\n", __func__);
  965. rc = -ENOMEM;
  966. goto write_fail2;
  967. }
  968. pkt->skb = skb;
  969. pkt->is_cmd = 0;
  970. set_tx_timestamp(pkt);
  971. spin_lock_irqsave(&a2_mux_ctx->bam_tx_pool_spinlock, flags);
  972. list_add_tail(&pkt->list_node, &a2_mux_ctx->bam_tx_pool);
  973. if (id == A2_MUX_TETHERED_0)
  974. rc = ipa_tx_dp(IPA_CLIENT_A2_TETHERED_CONS, skb, NULL);
  975. else
  976. rc = ipa_tx_dp(IPA_CLIENT_A2_EMBEDDED_CONS, skb, NULL);
  977. if (rc) {
  978. IPAERR("%s ipa_tx_dp failed rc=%d\n",
  979. __func__, rc);
  980. list_del(&pkt->list_node);
  981. spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
  982. flags);
  983. goto write_fail3;
  984. } else {
  985. spin_unlock_irqrestore(&a2_mux_ctx->bam_tx_pool_spinlock,
  986. flags);
  987. spin_lock_irqsave(&a2_mux_ctx->bam_ch[id].lock, flags);
  988. a2_mux_ctx->bam_ch[id].num_tx_pkts++;
  989. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[id].lock, flags);
  990. }
  991. return 0;
  992. write_fail3:
  993. kfree(pkt);
  994. write_fail2:
  995. if (new_skb)
  996. dev_kfree_skb_any(new_skb);
  997. write_fail:
  998. return rc;
  999. }
  1000. /**
  1001. * a2_mux_add_hdr() - called when MUX header should
  1002. * be added
  1003. * @lcid: logical channel ID
  1004. *
  1005. * Returns: 0 on success, negative on failure
  1006. */
  1007. static int a2_mux_add_hdr(enum a2_mux_logical_channel_id lcid)
  1008. {
  1009. struct ipa_ioc_add_hdr *hdrs;
  1010. struct ipa_hdr_add *ipv4_hdr;
  1011. struct ipa_hdr_add *ipv6_hdr;
  1012. struct bam_mux_hdr *dmux_hdr;
  1013. int rc;
  1014. IPADBG("%s: ch %d\n", __func__, lcid);
  1015. if (lcid < A2_MUX_WWAN_0 || lcid > A2_MUX_WWAN_7) {
  1016. IPAERR("%s: non valid lcid passed: %d\n", __func__, lcid);
  1017. return -EINVAL;
  1018. }
  1019. hdrs = kzalloc(sizeof(struct ipa_ioc_add_hdr) +
  1020. 2 * sizeof(struct ipa_hdr_add), GFP_KERNEL);
  1021. if (!hdrs) {
  1022. IPAERR("%s: hdr allocation fail for ch %d\n", __func__, lcid);
  1023. return -ENOMEM;
  1024. }
  1025. ipv4_hdr = &hdrs->hdr[0];
  1026. ipv6_hdr = &hdrs->hdr[1];
  1027. dmux_hdr = (struct bam_mux_hdr *)ipv4_hdr->hdr;
  1028. snprintf(ipv4_hdr->name, IPA_RESOURCE_NAME_MAX, "%s%d",
  1029. A2_MUX_HDR_NAME_V4_PREF, lcid);
  1030. dmux_hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
  1031. dmux_hdr->cmd = BAM_MUX_HDR_CMD_DATA;
  1032. dmux_hdr->reserved = 0;
  1033. dmux_hdr->ch_id = lcid;
  1034. /* Packet lenght is added by IPA */
  1035. dmux_hdr->pkt_len = 0;
  1036. dmux_hdr->pad_len = 0;
  1037. dmux_hdr->magic_num = htons(dmux_hdr->magic_num);
  1038. IPADBG("converted to network order magic_num=%d\n",
  1039. dmux_hdr->magic_num);
  1040. ipv4_hdr->hdr_len = sizeof(struct bam_mux_hdr);
  1041. ipv4_hdr->is_partial = 0;
  1042. dmux_hdr = (struct bam_mux_hdr *)ipv6_hdr->hdr;
  1043. snprintf(ipv6_hdr->name, IPA_RESOURCE_NAME_MAX, "%s%d",
  1044. A2_MUX_HDR_NAME_V6_PREF, lcid);
  1045. dmux_hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
  1046. dmux_hdr->cmd = BAM_MUX_HDR_CMD_DATA;
  1047. dmux_hdr->reserved = 0;
  1048. dmux_hdr->ch_id = lcid;
  1049. /* Packet lenght is added by IPA */
  1050. dmux_hdr->pkt_len = 0;
  1051. dmux_hdr->pad_len = 0;
  1052. dmux_hdr->magic_num = htons(dmux_hdr->magic_num);
  1053. IPADBG("converted to network order magic_num=%d\n",
  1054. dmux_hdr->magic_num);
  1055. ipv6_hdr->hdr_len = sizeof(struct bam_mux_hdr);
  1056. ipv6_hdr->is_partial = 0;
  1057. hdrs->commit = 1;
  1058. hdrs->num_hdrs = 2;
  1059. rc = ipa_add_hdr(hdrs);
  1060. if (rc) {
  1061. IPAERR("Fail on Header-Insertion(%d)\n", rc);
  1062. goto bail;
  1063. }
  1064. if (ipv4_hdr->status) {
  1065. IPAERR("Fail on Header-Insertion ipv4(%d)\n",
  1066. ipv4_hdr->status);
  1067. rc = ipv4_hdr->status;
  1068. goto bail;
  1069. }
  1070. if (ipv6_hdr->status) {
  1071. IPAERR("%s: Fail on Header-Insertion ipv4(%d)\n", __func__,
  1072. ipv6_hdr->status);
  1073. rc = ipv6_hdr->status;
  1074. goto bail;
  1075. }
  1076. a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl = ipv4_hdr->hdr_hdl;
  1077. a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl = ipv6_hdr->hdr_hdl;
  1078. rc = 0;
  1079. bail:
  1080. kfree(hdrs);
  1081. return rc;
  1082. }
  1083. /**
  1084. * a2_mux_del_hdr() - called when MUX header should
  1085. * be removed
  1086. * @lcid: logical channel ID
  1087. *
  1088. * Returns: 0 on success, negative on failure
  1089. */
  1090. static int a2_mux_del_hdr(enum a2_mux_logical_channel_id lcid)
  1091. {
  1092. struct ipa_ioc_del_hdr *hdrs;
  1093. struct ipa_hdr_del *ipv4_hdl;
  1094. struct ipa_hdr_del *ipv6_hdl;
  1095. int rc;
  1096. IPADBG("%s: ch %d\n", __func__, lcid);
  1097. if (lcid < A2_MUX_WWAN_0 || lcid > A2_MUX_WWAN_7) {
  1098. IPAERR("invalid lcid passed: %d\n", lcid);
  1099. return -EINVAL;
  1100. }
  1101. hdrs = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
  1102. 2 * sizeof(struct ipa_hdr_del), GFP_KERNEL);
  1103. if (!hdrs) {
  1104. IPAERR("hdr alloc fail for ch %d\n", lcid);
  1105. return -ENOMEM;
  1106. }
  1107. ipv4_hdl = &hdrs->hdl[0];
  1108. ipv6_hdl = &hdrs->hdl[1];
  1109. ipv4_hdl->hdl = a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl;
  1110. ipv6_hdl->hdl = a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl;
  1111. hdrs->commit = 1;
  1112. hdrs->num_hdls = 2;
  1113. rc = ipa_del_hdr(hdrs);
  1114. if (rc) {
  1115. IPAERR("Fail on Del Header-Insertion(%d)\n", rc);
  1116. goto bail;
  1117. }
  1118. if (ipv4_hdl->status) {
  1119. IPAERR("Fail on Del Header-Insertion ipv4(%d)\n",
  1120. ipv4_hdl->status);
  1121. rc = ipv4_hdl->status;
  1122. goto bail;
  1123. }
  1124. a2_mux_ctx->bam_ch[lcid].v4_hdr_hdl = 0;
  1125. if (ipv6_hdl->status) {
  1126. IPAERR("Fail on Del Header-Insertion ipv4(%d)\n",
  1127. ipv6_hdl->status);
  1128. rc = ipv6_hdl->status;
  1129. goto bail;
  1130. }
  1131. a2_mux_ctx->bam_ch[lcid].v6_hdr_hdl = 0;
  1132. rc = 0;
  1133. bail:
  1134. kfree(hdrs);
  1135. return rc;
  1136. }
  1137. /**
  1138. * a2_mux_open_channel() - opens logical channel
  1139. * to A2
  1140. * @lcid: logical channel ID
  1141. * @user_data: user provided data for below CB
  1142. * @notify_cb: user provided notification CB
  1143. *
  1144. * Returns: 0 on success, negative on failure
  1145. */
  1146. int a2_mux_open_channel(enum a2_mux_logical_channel_id lcid,
  1147. void *user_data,
  1148. a2_mux_notify_cb notify_cb)
  1149. {
  1150. struct bam_mux_hdr *hdr;
  1151. unsigned long flags;
  1152. int rc = 0;
  1153. bool is_connected;
  1154. IPADBG("%s: opening ch %d\n", __func__, lcid);
  1155. if (!a2_mux_ctx->a2_mux_initialized) {
  1156. IPAERR("%s: not inititialized\n", __func__);
  1157. return -ENODEV;
  1158. }
  1159. if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0) {
  1160. IPAERR("%s: invalid channel id %d\n", __func__, lcid);
  1161. return -EINVAL;
  1162. }
  1163. if (notify_cb == NULL) {
  1164. IPAERR("%s: notify function is NULL\n", __func__);
  1165. return -EINVAL;
  1166. }
  1167. spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  1168. if (bam_ch_is_open(lcid)) {
  1169. IPAERR("%s: Already opened %d\n", __func__, lcid);
  1170. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  1171. goto open_done;
  1172. }
  1173. if (!bam_ch_is_remote_open(lcid)) {
  1174. IPAERR("%s: Remote not open; ch: %d\n", __func__, lcid);
  1175. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  1176. return -ENODEV;
  1177. }
  1178. a2_mux_ctx->bam_ch[lcid].notify_cb = notify_cb;
  1179. a2_mux_ctx->bam_ch[lcid].user_data = user_data;
  1180. a2_mux_ctx->bam_ch[lcid].status |= BAM_CH_LOCAL_OPEN;
  1181. a2_mux_ctx->bam_ch[lcid].num_tx_pkts = 0;
  1182. a2_mux_ctx->bam_ch[lcid].use_wm = 0;
  1183. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  1184. read_lock(&a2_mux_ctx->ul_wakeup_lock);
  1185. is_connected = a2_mux_ctx->bam_is_connected &&
  1186. !a2_mux_ctx->bam_connect_in_progress;
  1187. read_unlock(&a2_mux_ctx->ul_wakeup_lock);
  1188. if (!is_connected)
  1189. return -ENODEV;
  1190. if (lcid != A2_MUX_TETHERED_0) {
  1191. hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_KERNEL);
  1192. if (hdr == NULL) {
  1193. IPAERR("%s: hdr kmalloc failed. ch: %d\n",
  1194. __func__, lcid);
  1195. return -ENOMEM;
  1196. }
  1197. hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
  1198. if (a2_mux_ctx->a2_mux_apps_pc_enabled) {
  1199. hdr->cmd = BAM_MUX_HDR_CMD_OPEN;
  1200. } else {
  1201. IPAERR("%s: PC DISABLED BY A5 SW BY INTENTION\n",
  1202. __func__);
  1203. a2_mux_ctx->a2_pc_disabled = 1;
  1204. hdr->cmd = BAM_MUX_HDR_CMD_OPEN_NO_A2_PC;
  1205. }
  1206. hdr->reserved = 0;
  1207. hdr->ch_id = lcid;
  1208. hdr->pkt_len = 0;
  1209. hdr->pad_len = 0;
  1210. hdr->magic_num = htons(hdr->magic_num);
  1211. hdr->pkt_len = htons(hdr->pkt_len);
  1212. IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
  1213. hdr->magic_num, hdr->pkt_len);
  1214. rc = a2_mux_write_cmd((void *)hdr,
  1215. sizeof(struct bam_mux_hdr));
  1216. if (rc) {
  1217. IPAERR("%s: bam_mux_write_cmd failed %d; ch: %d\n",
  1218. __func__, rc, lcid);
  1219. kfree(hdr);
  1220. return rc;
  1221. }
  1222. rc = a2_mux_add_hdr(lcid);
  1223. if (rc) {
  1224. IPAERR("a2_mux_add_hdr failed %d; ch: %d\n",
  1225. rc, lcid);
  1226. return rc;
  1227. }
  1228. }
  1229. open_done:
  1230. IPADBG("%s: opened ch %d\n", __func__, lcid);
  1231. return rc;
  1232. }
  1233. /**
  1234. * a2_mux_close_channel() - closes logical channel
  1235. * to A2
  1236. * @lcid: logical channel ID
  1237. *
  1238. * Returns: 0 on success, negative on failure
  1239. */
  1240. int a2_mux_close_channel(enum a2_mux_logical_channel_id lcid)
  1241. {
  1242. struct bam_mux_hdr *hdr;
  1243. unsigned long flags;
  1244. int rc = 0;
  1245. bool is_connected;
  1246. if (lcid >= A2_MUX_NUM_CHANNELS || lcid < 0)
  1247. return -EINVAL;
  1248. IPADBG("%s: closing ch %d\n", __func__, lcid);
  1249. if (!a2_mux_ctx->a2_mux_initialized)
  1250. return -ENODEV;
  1251. read_lock(&a2_mux_ctx->ul_wakeup_lock);
  1252. is_connected = a2_mux_ctx->bam_is_connected &&
  1253. !a2_mux_ctx->bam_connect_in_progress;
  1254. read_unlock(&a2_mux_ctx->ul_wakeup_lock);
  1255. if (!is_connected && !bam_ch_is_in_reset(lcid))
  1256. return -ENODEV;
  1257. spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  1258. a2_mux_ctx->bam_ch[lcid].notify_cb = NULL;
  1259. a2_mux_ctx->bam_ch[lcid].user_data = NULL;
  1260. a2_mux_ctx->bam_ch[lcid].status &= ~BAM_CH_LOCAL_OPEN;
  1261. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  1262. if (bam_ch_is_in_reset(lcid)) {
  1263. a2_mux_ctx->bam_ch[lcid].status &= ~BAM_CH_IN_RESET;
  1264. return 0;
  1265. }
  1266. if (lcid != A2_MUX_TETHERED_0) {
  1267. hdr = kmalloc(sizeof(struct bam_mux_hdr), GFP_ATOMIC);
  1268. if (hdr == NULL) {
  1269. IPAERR("%s: hdr kmalloc failed. ch: %d\n",
  1270. __func__, lcid);
  1271. return -ENOMEM;
  1272. }
  1273. hdr->magic_num = BAM_MUX_HDR_MAGIC_NO;
  1274. hdr->cmd = BAM_MUX_HDR_CMD_CLOSE;
  1275. hdr->reserved = 0;
  1276. hdr->ch_id = lcid;
  1277. hdr->pkt_len = 0;
  1278. hdr->pad_len = 0;
  1279. hdr->magic_num = htons(hdr->magic_num);
  1280. hdr->pkt_len = htons(hdr->pkt_len);
  1281. IPADBG("convert to network order magic_num=%d, pkt_len=%d\n",
  1282. hdr->magic_num, hdr->pkt_len);
  1283. rc = a2_mux_write_cmd((void *)hdr, sizeof(struct bam_mux_hdr));
  1284. if (rc) {
  1285. IPAERR("%s: bam_mux_write_cmd failed %d; ch: %d\n",
  1286. __func__, rc, lcid);
  1287. kfree(hdr);
  1288. return rc;
  1289. }
  1290. rc = a2_mux_del_hdr(lcid);
  1291. if (rc) {
  1292. IPAERR("a2_mux_del_hdr failed %d; ch: %d\n",
  1293. rc, lcid);
  1294. return rc;
  1295. }
  1296. }
  1297. IPADBG("%s: closed ch %d\n", __func__, lcid);
  1298. return 0;
  1299. }
  1300. /**
  1301. * a2_mux_is_ch_full() - checks if channel is above predefined WM,
  1302. * used for flow control implementation
  1303. * @lcid: logical channel ID
  1304. *
  1305. * Returns: true if the channel is above predefined WM,
  1306. * false otherwise
  1307. */
  1308. int a2_mux_is_ch_full(enum a2_mux_logical_channel_id lcid)
  1309. {
  1310. unsigned long flags;
  1311. int ret;
  1312. if (lcid >= A2_MUX_NUM_CHANNELS ||
  1313. lcid < 0)
  1314. return -EINVAL;
  1315. if (!a2_mux_ctx->a2_mux_initialized)
  1316. return -ENODEV;
  1317. spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  1318. a2_mux_ctx->bam_ch[lcid].use_wm = 1;
  1319. ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts >= HIGH_WATERMARK;
  1320. IPADBG("%s: ch %d num tx pkts=%d, HWM=%d\n", __func__,
  1321. lcid, a2_mux_ctx->bam_ch[lcid].num_tx_pkts, ret);
  1322. if (!bam_ch_is_local_open(lcid)) {
  1323. ret = -ENODEV;
  1324. IPAERR("%s: port not open: %d\n", __func__,
  1325. a2_mux_ctx->bam_ch[lcid].status);
  1326. }
  1327. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  1328. return ret;
  1329. }
  1330. /**
  1331. * a2_mux_is_ch_low() - checks if channel is below predefined WM,
  1332. * used for flow control implementation
  1333. * @lcid: logical channel ID
  1334. *
  1335. * Returns: true if the channel is below predefined WM,
  1336. * false otherwise
  1337. */
  1338. int a2_mux_is_ch_low(enum a2_mux_logical_channel_id lcid)
  1339. {
  1340. unsigned long flags;
  1341. int ret;
  1342. if (lcid >= A2_MUX_NUM_CHANNELS ||
  1343. lcid < 0)
  1344. return -EINVAL;
  1345. if (!a2_mux_ctx->a2_mux_initialized)
  1346. return -ENODEV;
  1347. spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  1348. a2_mux_ctx->bam_ch[lcid].use_wm = 1;
  1349. ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts <= LOW_WATERMARK;
  1350. IPADBG("%s: ch %d num tx pkts=%d, LWM=%d\n", __func__,
  1351. lcid, a2_mux_ctx->bam_ch[lcid].num_tx_pkts, ret);
  1352. if (!bam_ch_is_local_open(lcid)) {
  1353. ret = -ENODEV;
  1354. IPAERR("%s: port not open: %d\n", __func__,
  1355. a2_mux_ctx->bam_ch[lcid].status);
  1356. }
  1357. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  1358. return ret;
  1359. }
  1360. /**
  1361. * a2_mux_is_ch_empty() - checks if channel is empty.
  1362. * @lcid: logical channel ID
  1363. *
  1364. * Returns: true if the channel is empty,
  1365. * false otherwise
  1366. */
  1367. int a2_mux_is_ch_empty(enum a2_mux_logical_channel_id lcid)
  1368. {
  1369. unsigned long flags;
  1370. int ret;
  1371. if (lcid >= A2_MUX_NUM_CHANNELS ||
  1372. lcid < 0)
  1373. return -EINVAL;
  1374. if (!a2_mux_ctx->a2_mux_initialized)
  1375. return -ENODEV;
  1376. spin_lock_irqsave(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  1377. a2_mux_ctx->bam_ch[lcid].use_wm = 1;
  1378. ret = a2_mux_ctx->bam_ch[lcid].num_tx_pkts == 0;
  1379. if (!bam_ch_is_local_open(lcid)) {
  1380. ret = -ENODEV;
  1381. IPAERR("%s: port not open: %d\n", __func__,
  1382. a2_mux_ctx->bam_ch[lcid].status);
  1383. }
  1384. spin_unlock_irqrestore(&a2_mux_ctx->bam_ch[lcid].lock, flags);
  1385. return ret;
  1386. }
  1387. static int a2_mux_initialize_context(int handle)
  1388. {
  1389. int i;
  1390. a2_mux_ctx->a2_mux_apps_pc_enabled = 1;
  1391. a2_mux_ctx->a2_device_handle = handle;
  1392. INIT_WORK(&a2_mux_ctx->kickoff_ul_wakeup, kickoff_ul_wakeup_func);
  1393. INIT_WORK(&a2_mux_ctx->kickoff_ul_power_down,
  1394. kickoff_ul_power_down_func);
  1395. INIT_WORK(&a2_mux_ctx->kickoff_ul_request_resource,
  1396. kickoff_ul_request_resource_func);
  1397. INIT_LIST_HEAD(&a2_mux_ctx->bam_tx_pool);
  1398. spin_lock_init(&a2_mux_ctx->bam_tx_pool_spinlock);
  1399. mutex_init(&a2_mux_ctx->wakeup_lock);
  1400. rwlock_init(&a2_mux_ctx->ul_wakeup_lock);
  1401. spin_lock_init(&a2_mux_ctx->wakelock_reference_lock);
  1402. a2_mux_ctx->disconnect_ack = 1;
  1403. mutex_init(&a2_mux_ctx->smsm_cb_lock);
  1404. for (i = 0; i < A2_MUX_NUM_CHANNELS; ++i)
  1405. spin_lock_init(&a2_mux_ctx->bam_ch[i].lock);
  1406. init_completion(&a2_mux_ctx->ul_wakeup_ack_completion);
  1407. init_completion(&a2_mux_ctx->bam_connection_completion);
  1408. init_completion(&a2_mux_ctx->request_resource_completion);
  1409. init_completion(&a2_mux_ctx->dl_wakeup_completion);
  1410. wake_lock_init(&a2_mux_ctx->bam_wakelock,
  1411. WAKE_LOCK_SUSPEND, "a2_mux_wakelock");
  1412. a2_mux_ctx->a2_mux_initialized = 1;
  1413. a2_mux_ctx->a2_mux_send_power_vote_on_init_once = 1;
  1414. a2_mux_ctx->a2_mux_tx_workqueue =
  1415. create_singlethread_workqueue("a2_mux_tx");
  1416. if (!a2_mux_ctx->a2_mux_tx_workqueue) {
  1417. IPAERR("%s: a2_mux_tx_workqueue alloc failed\n",
  1418. __func__);
  1419. return -ENOMEM;
  1420. }
  1421. a2_mux_ctx->a2_mux_rx_workqueue =
  1422. create_singlethread_workqueue("a2_mux_rx");
  1423. if (!a2_mux_ctx->a2_mux_rx_workqueue) {
  1424. IPAERR("%s: a2_mux_rx_workqueue alloc failed\n",
  1425. __func__);
  1426. return -ENOMEM;
  1427. }
  1428. return 0;
  1429. }
  1430. /**
  1431. * a2_mux_init() - initialize A2 MUX component
  1432. *
  1433. * Returns: 0 on success, negative otherwise
  1434. */
  1435. int a2_mux_init(void)
  1436. {
  1437. int rc;
  1438. u32 h;
  1439. void *a2_virt_addr;
  1440. u32 a2_bam_mem_base;
  1441. u32 a2_bam_mem_size;
  1442. u32 a2_bam_irq;
  1443. struct sps_bam_props a2_props;
  1444. IPADBG("%s A2 MUX\n", __func__);
  1445. rc = ipa_get_a2_mux_bam_info(&a2_bam_mem_base,
  1446. &a2_bam_mem_size,
  1447. &a2_bam_irq);
  1448. if (rc) {
  1449. IPAERR("%s: ipa_get_a2_mux_bam_info failed\n", __func__);
  1450. rc = -EFAULT;
  1451. goto bail;
  1452. }
  1453. a2_virt_addr = ioremap_nocache((unsigned long)(a2_bam_mem_base),
  1454. a2_bam_mem_size);
  1455. if (!a2_virt_addr) {
  1456. IPAERR("%s: ioremap failed\n", __func__);
  1457. rc = -ENOMEM;
  1458. goto bail;
  1459. }
  1460. memset(&a2_props, 0, sizeof(a2_props));
  1461. a2_props.phys_addr = a2_bam_mem_base;
  1462. a2_props.virt_addr = a2_virt_addr;
  1463. a2_props.virt_size = a2_bam_mem_size;
  1464. a2_props.irq = a2_bam_irq;
  1465. a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
  1466. a2_props.num_pipes = A2_NUM_PIPES;
  1467. a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
  1468. a2_props.manage = SPS_BAM_MGR_DEVICE_REMOTE;
  1469. /* need to free on tear down */
  1470. rc = sps_register_bam_device(&a2_props, &h);
  1471. if (rc < 0) {
  1472. IPAERR("%s: register bam error %d\n", __func__, rc);
  1473. goto register_bam_failed;
  1474. }
  1475. a2_mux_ctx = kzalloc(sizeof(*a2_mux_ctx), GFP_KERNEL);
  1476. if (!a2_mux_ctx) {
  1477. IPAERR("%s: a2_mux_ctx alloc failed, rc: %d\n", __func__, rc);
  1478. rc = -ENOMEM;
  1479. goto register_bam_failed;
  1480. }
  1481. rc = a2_mux_initialize_context(h);
  1482. if (rc) {
  1483. IPAERR("%s: a2_mux_initialize_context failed, rc: %d\n",
  1484. __func__, rc);
  1485. goto ctx_alloc_failed;
  1486. }
  1487. rc = a2_mux_pm_initialize_rm();
  1488. if (rc) {
  1489. IPAERR("%s: a2_mux_pm_initialize_rm failed, rc: %d\n",
  1490. __func__, rc);
  1491. goto ctx_alloc_failed;
  1492. }
  1493. rc = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_A2_POWER_CONTROL,
  1494. a2_mux_smsm_cb, NULL);
  1495. if (rc) {
  1496. IPAERR("%s: smsm cb register failed, rc: %d\n", __func__, rc);
  1497. rc = -ENOMEM;
  1498. goto ctx_alloc_failed;
  1499. }
  1500. rc = smsm_state_cb_register(SMSM_MODEM_STATE,
  1501. SMSM_A2_POWER_CONTROL_ACK,
  1502. a2_mux_smsm_ack_cb, NULL);
  1503. if (rc) {
  1504. IPAERR("%s: smsm ack cb register failed, rc: %d\n",
  1505. __func__, rc);
  1506. rc = -ENOMEM;
  1507. goto smsm_ack_cb_reg_failed;
  1508. }
  1509. if (smsm_get_state(SMSM_MODEM_STATE) & SMSM_A2_POWER_CONTROL)
  1510. a2_mux_smsm_cb(NULL, 0, smsm_get_state(SMSM_MODEM_STATE));
  1511. /*
  1512. * Set remote channel open for tethered channel since there is
  1513. * no actual remote tethered channel
  1514. */
  1515. a2_mux_ctx->bam_ch[A2_MUX_TETHERED_0].status |= BAM_CH_REMOTE_OPEN;
  1516. rc = 0;
  1517. goto bail;
  1518. smsm_ack_cb_reg_failed:
  1519. smsm_state_cb_deregister(SMSM_MODEM_STATE,
  1520. SMSM_A2_POWER_CONTROL,
  1521. a2_mux_smsm_cb, NULL);
  1522. ctx_alloc_failed:
  1523. kfree(a2_mux_ctx);
  1524. register_bam_failed:
  1525. iounmap(a2_virt_addr);
  1526. bail:
  1527. return rc;
  1528. }
  1529. /**
  1530. * a2_mux_exit() - destroy A2 MUX component
  1531. *
  1532. * Returns: 0 on success, negative otherwise
  1533. */
  1534. int a2_mux_exit(void)
  1535. {
  1536. smsm_state_cb_deregister(SMSM_MODEM_STATE,
  1537. SMSM_A2_POWER_CONTROL_ACK,
  1538. a2_mux_smsm_ack_cb,
  1539. NULL);
  1540. smsm_state_cb_deregister(SMSM_MODEM_STATE,
  1541. SMSM_A2_POWER_CONTROL,
  1542. a2_mux_smsm_cb,
  1543. NULL);
  1544. if (a2_mux_ctx->a2_mux_tx_workqueue)
  1545. destroy_workqueue(a2_mux_ctx->a2_mux_tx_workqueue);
  1546. if (a2_mux_ctx->a2_mux_rx_workqueue)
  1547. destroy_workqueue(a2_mux_ctx->a2_mux_rx_workqueue);
  1548. return 0;
  1549. }