ipa_dp.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365
  1. /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/delay.h>
  13. #include <linux/device.h>
  14. #include <linux/dmapool.h>
  15. #include <linux/list.h>
  16. #include <linux/netdevice.h>
  17. #include "ipa_i.h"
  18. #define IPA_LAST_DESC_CNT 0xFFFF
  19. #define POLLING_INACTIVITY_RX 40
  20. #define POLLING_MIN_SLEEP_RX 950
  21. #define POLLING_MAX_SLEEP_RX 1050
  22. #define POLLING_INACTIVITY_TX 40
  23. #define POLLING_MIN_SLEEP_TX 400
  24. #define POLLING_MAX_SLEEP_TX 500
  25. static void replenish_rx_work_func(struct work_struct *work);
  26. static struct delayed_work replenish_rx_work;
  27. static void ipa_wq_handle_rx(struct work_struct *work);
  28. static DECLARE_WORK(rx_work, ipa_wq_handle_rx);
  29. static void ipa_wq_handle_tx(struct work_struct *work);
  30. static DECLARE_WORK(tx_work, ipa_wq_handle_tx);
  31. /**
  32. * ipa_write_done() - this function will be (eventually) called when a Tx
  33. * operation is complete
  34. * * @work: work_struct used by the work queue
  35. *
  36. * Will be called in deferred context.
  37. * - invoke the callback supplied by the client who sent this command
  38. * - iterate over all packets and validate that
  39. * the order for sent packet is the same as expected
  40. * - delete all the tx packet descriptors from the system
  41. * pipe context (not needed anymore)
  42. * - return the tx buffer back to dma_pool
  43. */
  44. void ipa_wq_write_done(struct work_struct *work)
  45. {
  46. struct ipa_tx_pkt_wrapper *tx_pkt;
  47. struct ipa_tx_pkt_wrapper *tx_pkt_expected;
  48. unsigned long irq_flags;
  49. tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
  50. if (unlikely(tx_pkt == NULL))
  51. WARN_ON(1);
  52. WARN_ON(tx_pkt->cnt != 1);
  53. spin_lock_irqsave(&tx_pkt->sys->spinlock, irq_flags);
  54. tx_pkt_expected = list_first_entry(&tx_pkt->sys->head_desc_list,
  55. struct ipa_tx_pkt_wrapper,
  56. link);
  57. if (unlikely(tx_pkt != tx_pkt_expected)) {
  58. spin_unlock_irqrestore(&tx_pkt->sys->spinlock,
  59. irq_flags);
  60. WARN_ON(1);
  61. }
  62. list_del(&tx_pkt->link);
  63. spin_unlock_irqrestore(&tx_pkt->sys->spinlock, irq_flags);
  64. if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
  65. dma_pool_free(ipa_ctx->dma_pool,
  66. tx_pkt->bounce,
  67. tx_pkt->mem.phys_base);
  68. } else {
  69. dma_unmap_single(NULL, tx_pkt->mem.phys_base,
  70. tx_pkt->mem.size,
  71. DMA_TO_DEVICE);
  72. }
  73. if (tx_pkt->callback)
  74. tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
  75. kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
  76. }
  77. int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all,
  78. bool in_poll_state)
  79. {
  80. struct ipa_tx_pkt_wrapper *tx_pkt;
  81. struct sps_iovec iov;
  82. int ret;
  83. int cnt = 0;
  84. unsigned long irq_flags;
  85. while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
  86. !atomic_read(&sys->curr_polling_state))) {
  87. if (cnt && !process_all)
  88. break;
  89. ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
  90. if (ret) {
  91. IPAERR("sps_get_iovec failed %d\n", ret);
  92. break;
  93. }
  94. if (iov.addr == 0)
  95. break;
  96. if (unlikely(list_empty(&sys->head_desc_list)))
  97. continue;
  98. spin_lock_irqsave(&sys->spinlock, irq_flags);
  99. tx_pkt = list_first_entry(&sys->head_desc_list,
  100. struct ipa_tx_pkt_wrapper, link);
  101. sys->len--;
  102. list_del(&tx_pkt->link);
  103. spin_unlock_irqrestore(&sys->spinlock, irq_flags);
  104. IPADBG("--curr_cnt=%d\n", sys->len);
  105. if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
  106. dma_pool_free(ipa_ctx->dma_pool,
  107. tx_pkt->bounce,
  108. tx_pkt->mem.phys_base);
  109. else
  110. dma_unmap_single(NULL, tx_pkt->mem.phys_base,
  111. tx_pkt->mem.size,
  112. DMA_TO_DEVICE);
  113. if (tx_pkt->callback)
  114. tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
  115. if (tx_pkt->cnt > 1 && tx_pkt->cnt != IPA_LAST_DESC_CNT)
  116. dma_pool_free(ipa_ctx->dma_pool, tx_pkt->mult.base,
  117. tx_pkt->mult.phys_base);
  118. kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
  119. cnt++;
  120. };
  121. return cnt;
  122. }
  123. /**
  124. * ipa_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode
  125. */
  126. static void ipa_tx_switch_to_intr_mode(struct ipa_sys_context *sys)
  127. {
  128. int ret;
  129. if (!atomic_read(&sys->curr_polling_state)) {
  130. IPAERR("already in intr mode\n");
  131. goto fail;
  132. }
  133. ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
  134. if (ret) {
  135. IPAERR("sps_get_config() failed %d\n", ret);
  136. goto fail;
  137. }
  138. sys->event.options = SPS_O_EOT;
  139. ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
  140. if (ret) {
  141. IPAERR("sps_register_event() failed %d\n", ret);
  142. goto fail;
  143. }
  144. sys->ep->connect.options =
  145. SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
  146. ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
  147. if (ret) {
  148. IPAERR("sps_set_config() failed %d\n", ret);
  149. goto fail;
  150. }
  151. atomic_set(&sys->curr_polling_state, 0);
  152. ipa_handle_tx_core(sys, true, false);
  153. return;
  154. fail:
  155. IPA_STATS_INC_CNT(ipa_ctx->stats.x_intr_repost_tx);
  156. schedule_delayed_work(&sys->switch_to_intr_work, msecs_to_jiffies(1));
  157. return;
  158. }
  159. static void ipa_handle_tx(struct ipa_sys_context *sys)
  160. {
  161. int inactive_cycles = 0;
  162. int cnt;
  163. ipa_inc_client_enable_clks();
  164. do {
  165. cnt = ipa_handle_tx_core(sys, true, true);
  166. if (cnt == 0) {
  167. inactive_cycles++;
  168. usleep_range(POLLING_MIN_SLEEP_TX,
  169. POLLING_MAX_SLEEP_TX);
  170. } else {
  171. inactive_cycles = 0;
  172. }
  173. } while (inactive_cycles <= POLLING_INACTIVITY_TX);
  174. ipa_tx_switch_to_intr_mode(sys);
  175. ipa_dec_client_disable_clks();
  176. }
  177. static void ipa_wq_handle_tx(struct work_struct *work)
  178. {
  179. ipa_handle_tx(&ipa_ctx->sys[IPA_A5_LAN_WAN_OUT]);
  180. }
  181. /**
  182. * ipa_send_one() - Send a single descriptor
  183. * @sys: system pipe context
  184. * @desc: descriptor to send
  185. * @in_atomic: whether caller is in atomic context
  186. *
  187. * - Allocate tx_packet wrapper
  188. * - Allocate a bounce buffer due to HW constrains
  189. * (This buffer will be used for the DMA command)
  190. * - Copy the data (desc->pyld) to the bounce buffer
  191. * - transfer data to the IPA
  192. * - after the transfer was done the SPS will
  193. * notify the sending user via ipa_sps_irq_comp_tx()
  194. *
  195. * Return codes: 0: success, -EFAULT: failure
  196. */
  197. int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
  198. bool in_atomic)
  199. {
  200. struct ipa_tx_pkt_wrapper *tx_pkt;
  201. unsigned long irq_flags;
  202. int result;
  203. u16 sps_flags = SPS_IOVEC_FLAG_EOT;
  204. dma_addr_t dma_address;
  205. u16 len;
  206. u32 mem_flag = GFP_ATOMIC;
  207. if (unlikely(!in_atomic))
  208. mem_flag = GFP_KERNEL;
  209. tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag);
  210. if (!tx_pkt) {
  211. IPAERR("failed to alloc tx wrapper\n");
  212. goto fail_mem_alloc;
  213. }
  214. if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
  215. WARN_ON(desc->len > 512);
  216. /*
  217. * Due to a HW limitation, we need to make sure that the packet
  218. * does not cross a 1KB boundary
  219. */
  220. tx_pkt->bounce = dma_pool_alloc(
  221. ipa_ctx->dma_pool,
  222. mem_flag, &dma_address);
  223. if (!tx_pkt->bounce) {
  224. dma_address = 0;
  225. } else {
  226. WARN_ON(!ipa_straddle_boundary
  227. ((u32)dma_address,
  228. (u32)dma_address + desc->len - 1,
  229. 1024));
  230. memcpy(tx_pkt->bounce, desc->pyld, desc->len);
  231. }
  232. } else {
  233. dma_address = dma_map_single(NULL, desc->pyld, desc->len,
  234. DMA_TO_DEVICE);
  235. }
  236. if (!dma_address) {
  237. IPAERR("failed to DMA wrap\n");
  238. goto fail_dma_map;
  239. }
  240. INIT_LIST_HEAD(&tx_pkt->link);
  241. tx_pkt->type = desc->type;
  242. tx_pkt->cnt = 1; /* only 1 desc in this "set" */
  243. tx_pkt->mem.phys_base = dma_address;
  244. tx_pkt->mem.base = desc->pyld;
  245. tx_pkt->mem.size = desc->len;
  246. tx_pkt->sys = sys;
  247. tx_pkt->callback = desc->callback;
  248. tx_pkt->user1 = desc->user1;
  249. tx_pkt->user2 = desc->user2;
  250. /*
  251. * Special treatment for immediate commands, where the structure of the
  252. * descriptor is different
  253. */
  254. if (desc->type == IPA_IMM_CMD_DESC) {
  255. sps_flags |= SPS_IOVEC_FLAG_IMME;
  256. len = desc->opcode;
  257. IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
  258. desc->opcode, desc->len, sps_flags);
  259. IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
  260. } else {
  261. len = desc->len;
  262. }
  263. INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
  264. spin_lock_irqsave(&sys->spinlock, irq_flags);
  265. list_add_tail(&tx_pkt->link, &sys->head_desc_list);
  266. result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
  267. sps_flags);
  268. if (result) {
  269. IPAERR("sps_transfer_one failed rc=%d\n", result);
  270. goto fail_sps_send;
  271. }
  272. spin_unlock_irqrestore(&sys->spinlock, irq_flags);
  273. return 0;
  274. fail_sps_send:
  275. list_del(&tx_pkt->link);
  276. spin_unlock_irqrestore(&sys->spinlock, irq_flags);
  277. if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
  278. dma_pool_free(ipa_ctx->dma_pool, tx_pkt->bounce,
  279. dma_address);
  280. else
  281. dma_unmap_single(NULL, dma_address, desc->len, DMA_TO_DEVICE);
  282. fail_dma_map:
  283. kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
  284. fail_mem_alloc:
  285. return -EFAULT;
  286. }
  287. /**
  288. * ipa_send() - Send multiple descriptors in one HW transaction
  289. * @sys: system pipe context
  290. * @num_desc: number of packets
  291. * @desc: packets to send (may be immediate command or data)
  292. * @in_atomic: whether caller is in atomic context
  293. *
  294. * This function is used for system-to-bam connection.
  295. * - SPS driver expect struct sps_transfer which will contain all the data
  296. * for a transaction
  297. * - The sps_transfer struct will be pointing to bounce buffers for
  298. * its DMA command (immediate command and data)
  299. * - ipa_tx_pkt_wrapper will be used for each ipa
  300. * descriptor (allocated from wrappers cache)
  301. * - The wrapper struct will be configured for each ipa-desc payload and will
  302. * contain information which will be later used by the user callbacks
  303. * - each transfer will be made by calling to sps_transfer()
  304. * - Each packet (command or data) that will be sent will also be saved in
  305. * ipa_sys_context for later check that all data was sent
  306. *
  307. * Return codes: 0: success, -EFAULT: failure
  308. */
  309. int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
  310. bool in_atomic)
  311. {
  312. struct ipa_tx_pkt_wrapper *tx_pkt;
  313. struct ipa_tx_pkt_wrapper *next_pkt;
  314. struct sps_transfer transfer = { 0 };
  315. struct sps_iovec *iovec;
  316. unsigned long irq_flags;
  317. dma_addr_t dma_addr;
  318. int i = 0;
  319. int j;
  320. int result;
  321. int fail_dma_wrap = 0;
  322. uint size = num_desc * sizeof(struct sps_iovec);
  323. u32 mem_flag = GFP_ATOMIC;
  324. if (unlikely(!in_atomic))
  325. mem_flag = GFP_KERNEL;
  326. transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag, &dma_addr);
  327. transfer.iovec_phys = dma_addr;
  328. transfer.iovec_count = num_desc;
  329. spin_lock_irqsave(&sys->spinlock, irq_flags);
  330. if (!transfer.iovec) {
  331. IPAERR("fail to alloc DMA mem for sps xfr buff\n");
  332. goto failure_coherent;
  333. }
  334. for (i = 0; i < num_desc; i++) {
  335. fail_dma_wrap = 0;
  336. tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
  337. mem_flag);
  338. if (!tx_pkt) {
  339. IPAERR("failed to alloc tx wrapper\n");
  340. goto failure;
  341. }
  342. /*
  343. * first desc of set is "special" as it holds the count and
  344. * other info
  345. */
  346. if (i == 0) {
  347. transfer.user = tx_pkt;
  348. tx_pkt->mult.phys_base = dma_addr;
  349. tx_pkt->mult.base = transfer.iovec;
  350. tx_pkt->mult.size = size;
  351. tx_pkt->cnt = num_desc;
  352. }
  353. iovec = &transfer.iovec[i];
  354. iovec->flags = 0;
  355. INIT_LIST_HEAD(&tx_pkt->link);
  356. tx_pkt->type = desc[i].type;
  357. tx_pkt->mem.base = desc[i].pyld;
  358. tx_pkt->mem.size = desc[i].len;
  359. if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0)) {
  360. WARN_ON(tx_pkt->mem.size > 512);
  361. /*
  362. * Due to a HW limitation, we need to make sure that the
  363. * packet does not cross a 1KB boundary
  364. */
  365. tx_pkt->bounce =
  366. dma_pool_alloc(ipa_ctx->dma_pool,
  367. mem_flag,
  368. &tx_pkt->mem.phys_base);
  369. if (!tx_pkt->bounce) {
  370. tx_pkt->mem.phys_base = 0;
  371. } else {
  372. WARN_ON(!ipa_straddle_boundary(
  373. (u32)tx_pkt->mem.phys_base,
  374. (u32)tx_pkt->mem.phys_base +
  375. tx_pkt->mem.size - 1, 1024));
  376. memcpy(tx_pkt->bounce, tx_pkt->mem.base,
  377. tx_pkt->mem.size);
  378. }
  379. } else {
  380. tx_pkt->mem.phys_base =
  381. dma_map_single(NULL, tx_pkt->mem.base,
  382. tx_pkt->mem.size,
  383. DMA_TO_DEVICE);
  384. }
  385. if (!tx_pkt->mem.phys_base) {
  386. IPAERR("failed to alloc tx wrapper\n");
  387. fail_dma_wrap = 1;
  388. goto failure;
  389. }
  390. tx_pkt->sys = sys;
  391. tx_pkt->callback = desc[i].callback;
  392. tx_pkt->user1 = desc[i].user1;
  393. tx_pkt->user2 = desc[i].user2;
  394. /*
  395. * Point the iovec to the bounce buffer and
  396. * add this packet to system pipe context.
  397. */
  398. iovec->addr = tx_pkt->mem.phys_base;
  399. list_add_tail(&tx_pkt->link, &sys->head_desc_list);
  400. /*
  401. * Special treatment for immediate commands, where the structure
  402. * of the descriptor is different
  403. */
  404. if (desc[i].type == IPA_IMM_CMD_DESC) {
  405. iovec->size = desc[i].opcode;
  406. iovec->flags |= SPS_IOVEC_FLAG_IMME;
  407. } else {
  408. iovec->size = desc[i].len;
  409. }
  410. if (i == (num_desc - 1)) {
  411. iovec->flags |= SPS_IOVEC_FLAG_EOT;
  412. /* "mark" the last desc */
  413. tx_pkt->cnt = IPA_LAST_DESC_CNT;
  414. }
  415. }
  416. result = sps_transfer(sys->ep->ep_hdl, &transfer);
  417. if (result) {
  418. IPAERR("sps_transfer failed rc=%d\n", result);
  419. goto failure;
  420. }
  421. spin_unlock_irqrestore(&sys->spinlock, irq_flags);
  422. return 0;
  423. failure:
  424. tx_pkt = transfer.user;
  425. for (j = 0; j < i; j++) {
  426. next_pkt = list_next_entry(tx_pkt, link);
  427. list_del(&tx_pkt->link);
  428. if (unlikely(ipa_ctx->ipa_hw_type == IPA_HW_v1_0))
  429. dma_pool_free(ipa_ctx->dma_pool,
  430. tx_pkt->bounce,
  431. tx_pkt->mem.phys_base);
  432. else
  433. dma_unmap_single(NULL, tx_pkt->mem.phys_base,
  434. tx_pkt->mem.size,
  435. DMA_TO_DEVICE);
  436. kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
  437. tx_pkt = next_pkt;
  438. }
  439. if (i < num_desc)
  440. /* last desc failed */
  441. if (fail_dma_wrap)
  442. kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
  443. if (transfer.iovec_phys)
  444. dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
  445. transfer.iovec_phys);
  446. failure_coherent:
  447. spin_unlock_irqrestore(&sys->spinlock, irq_flags);
  448. return -EFAULT;
  449. }
  450. /**
  451. * ipa_sps_irq_cmd_ack - callback function which will be called by SPS driver after an
  452. * immediate command is complete.
  453. * @user1: pointer to the descriptor of the transfer
  454. * @user2:
  455. *
  456. * Complete the immediate commands completion object, this will release the
  457. * thread which waits on this completion object (ipa_send_cmd())
  458. */
  459. static void ipa_sps_irq_cmd_ack(void *user1, void *user2)
  460. {
  461. struct ipa_desc *desc = (struct ipa_desc *)user1;
  462. if (!desc)
  463. WARN_ON(1);
  464. IPADBG("got ack for cmd=%d\n", desc->opcode);
  465. complete(&desc->xfer_done);
  466. }
  467. /**
  468. * ipa_send_cmd - send immediate commands
  469. * @num_desc: number of descriptors within the desc struct
  470. * @descr: descriptor structure
  471. *
  472. * Function will block till command gets ACK from IPA HW, caller needs
  473. * to free any resources it allocated after function returns
  474. * The callback in ipa_desc should not be set by the caller
  475. * for this function.
  476. */
  477. int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
  478. {
  479. struct ipa_desc *desc;
  480. int result = 0;
  481. ipa_inc_client_enable_clks();
  482. if (num_desc == 1) {
  483. init_completion(&descr->xfer_done);
  484. if (descr->callback || descr->user1)
  485. WARN_ON(1);
  486. descr->callback = ipa_sps_irq_cmd_ack;
  487. descr->user1 = descr;
  488. if (ipa_send_one(&ipa_ctx->sys[IPA_A5_CMD], descr, false)) {
  489. IPAERR("fail to send immediate command\n");
  490. result = -EFAULT;
  491. goto bail;
  492. }
  493. wait_for_completion(&descr->xfer_done);
  494. } else {
  495. desc = &descr[num_desc - 1];
  496. init_completion(&desc->xfer_done);
  497. if (desc->callback || desc->user1)
  498. WARN_ON(1);
  499. desc->callback = ipa_sps_irq_cmd_ack;
  500. desc->user1 = desc;
  501. if (ipa_send(&ipa_ctx->sys[IPA_A5_CMD], num_desc,
  502. descr, false)) {
  503. IPAERR("fail to send multiple immediate command set\n");
  504. result = -EFAULT;
  505. goto bail;
  506. }
  507. wait_for_completion(&desc->xfer_done);
  508. }
  509. IPA_STATS_INC_IC_CNT(num_desc, descr, ipa_ctx->stats.imm_cmds);
  510. bail:
  511. ipa_dec_client_disable_clks();
  512. return result;
  513. }
  514. /**
  515. * ipa_sps_irq_tx_notify() - Callback function which will be called by
  516. * the SPS driver to start a Tx poll operation.
  517. * Called in an interrupt context.
  518. * @notify: SPS driver supplied notification struct
  519. *
  520. * This function defer the work for this event to the tx workqueue.
  521. */
  522. static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)
  523. {
  524. struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_OUT];
  525. int ret;
  526. IPADBG("event %d notified\n", notify->event_id);
  527. switch (notify->event_id) {
  528. case SPS_EVENT_EOT:
  529. if (!atomic_read(&sys->curr_polling_state)) {
  530. ret = sps_get_config(sys->ep->ep_hdl,
  531. &sys->ep->connect);
  532. if (ret) {
  533. IPAERR("sps_get_config() failed %d\n", ret);
  534. break;
  535. }
  536. sys->ep->connect.options = SPS_O_AUTO_ENABLE |
  537. SPS_O_ACK_TRANSFERS | SPS_O_POLL;
  538. ret = sps_set_config(sys->ep->ep_hdl,
  539. &sys->ep->connect);
  540. if (ret) {
  541. IPAERR("sps_set_config() failed %d\n", ret);
  542. break;
  543. }
  544. atomic_set(&sys->curr_polling_state, 1);
  545. queue_work(ipa_ctx->tx_wq, &tx_work);
  546. }
  547. break;
  548. default:
  549. IPAERR("recieved unexpected event id %d\n", notify->event_id);
  550. }
  551. }
  552. /**
  553. * ipa_sps_irq_tx_no_aggr_notify() - Callback function which will be called by
  554. * the SPS driver after a Tx operation is complete.
  555. * Called in an interrupt context.
  556. * @notify: SPS driver supplied notification struct
  557. *
  558. * This function defer the work for this event to the tx workqueue.
  559. * This event will be later handled by ipa_write_done.
  560. */
  561. static void ipa_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
  562. {
  563. struct ipa_tx_pkt_wrapper *tx_pkt;
  564. IPADBG("event %d notified\n", notify->event_id);
  565. switch (notify->event_id) {
  566. case SPS_EVENT_EOT:
  567. tx_pkt = notify->data.transfer.user;
  568. schedule_work(&tx_pkt->work);
  569. break;
  570. default:
  571. IPAERR("recieved unexpected event id %d\n", notify->event_id);
  572. }
  573. }
  574. /**
  575. * ipa_handle_rx_core() - The core functionality of packet reception. This
  576. * function is read from multiple code paths.
  577. *
  578. * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
  579. * endpoint. The function runs as long as there are packets in the pipe.
  580. * For each packet:
  581. * - Disconnect the packet from the system pipe linked list
  582. * - Unmap the packets skb, make it non DMAable
  583. * - Free the packet from the cache
  584. * - Prepare a proper skb
  585. * - Call the endpoints notify function, passing the skb in the parameters
  586. * - Replenish the rx cache
  587. */
  588. int ipa_handle_rx_core(struct ipa_sys_context *sys, bool process_all,
  589. bool in_poll_state)
  590. {
  591. struct ipa_a5_mux_hdr *mux_hdr;
  592. struct ipa_rx_pkt_wrapper *rx_pkt;
  593. struct sk_buff *rx_skb;
  594. struct sps_iovec iov;
  595. unsigned int pull_len;
  596. unsigned int padding;
  597. int ret;
  598. struct ipa_ep_context *ep;
  599. int cnt = 0;
  600. unsigned int src_pipe;
  601. while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
  602. !atomic_read(&sys->curr_polling_state))) {
  603. if (cnt && !process_all)
  604. break;
  605. ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
  606. if (ret) {
  607. IPAERR("sps_get_iovec failed %d\n", ret);
  608. break;
  609. }
  610. if (iov.addr == 0)
  611. break;
  612. if (unlikely(list_empty(&sys->head_desc_list)))
  613. continue;
  614. rx_pkt = list_first_entry(&sys->head_desc_list,
  615. struct ipa_rx_pkt_wrapper, link);
  616. rx_pkt->len = iov.size;
  617. sys->len--;
  618. list_del(&rx_pkt->link);
  619. IPADBG("--curr_cnt=%d\n", sys->len);
  620. rx_skb = rx_pkt->skb;
  621. dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
  622. DMA_FROM_DEVICE);
  623. /*
  624. * make it look like a real skb, "data" was already set at
  625. * alloc time
  626. */
  627. rx_skb->tail = rx_skb->data + rx_pkt->len;
  628. rx_skb->len = rx_pkt->len;
  629. rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
  630. kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
  631. mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
  632. src_pipe = mux_hdr->src_pipe_index;
  633. IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
  634. rx_skb->len, ntohs(mux_hdr->interface_id),
  635. src_pipe, mux_hdr->flags, ntohl(mux_hdr->metadata));
  636. IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
  637. IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
  638. IPA_STATS_EXCP_CNT(mux_hdr->flags, ipa_ctx->stats.rx_excp_pkts);
  639. /*
  640. * Any packets arriving over AMPDU_TX should be dispatched
  641. * to the regular WLAN RX data-path.
  642. */
  643. if (unlikely(src_pipe == WLAN_AMPDU_TX_EP))
  644. src_pipe = WLAN_PROD_TX_EP;
  645. if (unlikely(src_pipe >= IPA_NUM_PIPES ||
  646. !ipa_ctx->ep[src_pipe].valid ||
  647. !ipa_ctx->ep[src_pipe].client_notify)) {
  648. IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
  649. src_pipe, ipa_ctx->ep[src_pipe].valid,
  650. ipa_ctx->ep[src_pipe].client_notify);
  651. dev_kfree_skb(rx_skb);
  652. ipa_replenish_rx_cache();
  653. ++cnt;
  654. continue;
  655. }
  656. ep = &ipa_ctx->ep[src_pipe];
  657. pull_len = sizeof(struct ipa_a5_mux_hdr);
  658. /*
  659. * IP packet starts on word boundary
  660. * remove the MUX header and any padding and pass the frame to
  661. * the client which registered a rx callback on the "src pipe"
  662. */
  663. padding = ep->cfg.hdr.hdr_len & 0x3;
  664. if (padding)
  665. pull_len += 4 - padding;
  666. IPADBG("pulling %d bytes from skb\n", pull_len);
  667. skb_pull(rx_skb, pull_len);
  668. ipa_replenish_rx_cache();
  669. ep->client_notify(ep->priv, IPA_RECEIVE,
  670. (unsigned long)(rx_skb));
  671. cnt++;
  672. };
  673. return cnt;
  674. }
  675. /**
  676. * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
  677. */
  678. static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys)
  679. {
  680. int ret;
  681. if (!atomic_read(&sys->curr_polling_state)) {
  682. IPAERR("already in intr mode\n");
  683. goto fail;
  684. }
  685. ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
  686. if (ret) {
  687. IPAERR("sps_get_config() failed %d\n", ret);
  688. goto fail;
  689. }
  690. sys->event.options = SPS_O_EOT;
  691. ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
  692. if (ret) {
  693. IPAERR("sps_register_event() failed %d\n", ret);
  694. goto fail;
  695. }
  696. sys->ep->connect.options =
  697. SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
  698. ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
  699. if (ret) {
  700. IPAERR("sps_set_config() failed %d\n", ret);
  701. goto fail;
  702. }
  703. atomic_set(&sys->curr_polling_state, 0);
  704. ipa_handle_rx_core(sys, true, false);
  705. return;
  706. fail:
  707. IPA_STATS_INC_CNT(ipa_ctx->stats.x_intr_repost);
  708. schedule_delayed_work(&sys->switch_to_intr_work, msecs_to_jiffies(1));
  709. }
  710. /**
  711. * ipa_rx_notify() - Callback function which is called by the SPS driver when a
  712. * a packet is received
  713. * @notify: SPS driver supplied notification information
  714. *
  715. * Called in an interrupt context, therefore the majority of the work is
  716. * deffered using a work queue.
  717. *
  718. * After receiving a packet, the driver goes to polling mode and keeps pulling
  719. * packets until the rx buffer is empty, then it goes back to interrupt mode.
  720. * This comes to prevent the CPU from handling too many interrupts when the
  721. * throughput is high.
  722. */
  723. static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
  724. {
  725. struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
  726. int ret;
  727. IPADBG("event %d notified\n", notify->event_id);
  728. switch (notify->event_id) {
  729. case SPS_EVENT_EOT:
  730. if (!atomic_read(&sys->curr_polling_state)) {
  731. ret = sps_get_config(sys->ep->ep_hdl,
  732. &sys->ep->connect);
  733. if (ret) {
  734. IPAERR("sps_get_config() failed %d\n", ret);
  735. break;
  736. }
  737. sys->ep->connect.options = SPS_O_AUTO_ENABLE |
  738. SPS_O_ACK_TRANSFERS | SPS_O_POLL;
  739. ret = sps_set_config(sys->ep->ep_hdl,
  740. &sys->ep->connect);
  741. if (ret) {
  742. IPAERR("sps_set_config() failed %d\n", ret);
  743. break;
  744. }
  745. atomic_set(&sys->curr_polling_state, 1);
  746. queue_work(ipa_ctx->rx_wq, &rx_work);
  747. }
  748. break;
  749. default:
  750. IPAERR("recieved unexpected event id %d\n", notify->event_id);
  751. }
  752. }
  753. static void switch_to_intr_tx_work_func(struct work_struct *work)
  754. {
  755. struct delayed_work *dwork;
  756. struct ipa_sys_context *sys;
  757. dwork = container_of(work, struct delayed_work, work);
  758. sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
  759. ipa_handle_tx(sys);
  760. }
  761. /**
  762. * ipa_handle_rx() - handle packet reception. This function is executed in the
  763. * context of a work queue.
  764. * @work: work struct needed by the work queue
  765. *
  766. * ipa_handle_rx_core() is run in polling mode. After all packets has been
  767. * received, the driver switches back to interrupt mode.
  768. */
  769. static void ipa_handle_rx(struct ipa_sys_context *sys)
  770. {
  771. int inactive_cycles = 0;
  772. int cnt;
  773. ipa_inc_client_enable_clks();
  774. do {
  775. cnt = ipa_handle_rx_core(sys, true, true);
  776. if (cnt == 0) {
  777. inactive_cycles++;
  778. usleep_range(POLLING_MIN_SLEEP_RX,
  779. POLLING_MAX_SLEEP_RX);
  780. } else {
  781. inactive_cycles = 0;
  782. }
  783. } while (inactive_cycles <= POLLING_INACTIVITY_RX);
  784. ipa_rx_switch_to_intr_mode(sys);
  785. ipa_dec_client_disable_clks();
  786. }
  787. static void switch_to_intr_rx_work_func(struct work_struct *work)
  788. {
  789. struct delayed_work *dwork;
  790. struct ipa_sys_context *sys;
  791. dwork = container_of(work, struct delayed_work, work);
  792. sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
  793. ipa_handle_rx(sys);
  794. }
  795. /**
  796. * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
  797. * IPA EP configuration
  798. * @sys_in: [in] input needed to setup BAM pipe and configure EP
  799. * @clnt_hdl: [out] client handle
  800. *
  801. * - configure the end-point registers with the supplied
  802. * parameters from the user.
  803. * - call SPS APIs to create a system-to-bam connection with IPA.
  804. * - allocate descriptor FIFO
  805. * - register callback function(ipa_sps_irq_rx_notify or
  806. * ipa_sps_irq_tx_notify - depends on client type) in case the driver is
  807. * not configured to pulling mode
  808. *
  809. * Returns: 0 on success, negative on failure
  810. */
  811. int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
  812. {
  813. int ipa_ep_idx;
  814. int sys_idx = -1;
  815. int result = -EFAULT;
  816. dma_addr_t dma_addr;
  817. if (sys_in == NULL || clnt_hdl == NULL ||
  818. sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
  819. IPAERR("bad parm.\n");
  820. result = -EINVAL;
  821. goto fail_bad_param;
  822. }
  823. ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, sys_in->client);
  824. if (ipa_ep_idx == -1) {
  825. IPAERR("Invalid client.\n");
  826. goto fail_bad_param;
  827. }
  828. if (ipa_ctx->ep[ipa_ep_idx].valid == 1) {
  829. IPAERR("EP already allocated.\n");
  830. goto fail_bad_param;
  831. }
  832. memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
  833. ipa_ctx->ep[ipa_ep_idx].valid = 1;
  834. ipa_ctx->ep[ipa_ep_idx].client = sys_in->client;
  835. ipa_ctx->ep[ipa_ep_idx].client_notify = sys_in->notify;
  836. ipa_ctx->ep[ipa_ep_idx].priv = sys_in->priv;
  837. if (ipa_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
  838. IPAERR("fail to configure EP.\n");
  839. goto fail_sps_api;
  840. }
  841. /* Default Config */
  842. ipa_ctx->ep[ipa_ep_idx].ep_hdl = sps_alloc_endpoint();
  843. if (ipa_ctx->ep[ipa_ep_idx].ep_hdl == NULL) {
  844. IPAERR("SPS EP allocation failed.\n");
  845. goto fail_sps_api;
  846. }
  847. result = sps_get_config(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
  848. &ipa_ctx->ep[ipa_ep_idx].connect);
  849. if (result) {
  850. IPAERR("fail to get config.\n");
  851. goto fail_mem_alloc;
  852. }
  853. /* Specific Config */
  854. if (IPA_CLIENT_IS_CONS(sys_in->client)) {
  855. ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_SRC;
  856. ipa_ctx->ep[ipa_ep_idx].connect.destination =
  857. SPS_DEV_HANDLE_MEM;
  858. ipa_ctx->ep[ipa_ep_idx].connect.source = ipa_ctx->bam_handle;
  859. ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index =
  860. ipa_ctx->a5_pipe_index++;
  861. ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index = ipa_ep_idx;
  862. ipa_ctx->ep[ipa_ep_idx].connect.options = SPS_O_ACK_TRANSFERS |
  863. SPS_O_NO_DISABLE;
  864. } else {
  865. ipa_ctx->ep[ipa_ep_idx].connect.mode = SPS_MODE_DEST;
  866. ipa_ctx->ep[ipa_ep_idx].connect.source = SPS_DEV_HANDLE_MEM;
  867. ipa_ctx->ep[ipa_ep_idx].connect.destination =
  868. ipa_ctx->bam_handle;
  869. ipa_ctx->ep[ipa_ep_idx].connect.src_pipe_index =
  870. ipa_ctx->a5_pipe_index++;
  871. ipa_ctx->ep[ipa_ep_idx].connect.dest_pipe_index = ipa_ep_idx;
  872. if (sys_in->client == IPA_CLIENT_A5_LAN_WAN_PROD)
  873. ipa_ctx->ep[ipa_ep_idx].connect.options |=
  874. SPS_O_ACK_TRANSFERS;
  875. }
  876. ipa_ctx->ep[ipa_ep_idx].connect.options |= (SPS_O_AUTO_ENABLE |
  877. SPS_O_EOT);
  878. if (ipa_ctx->polling_mode)
  879. ipa_ctx->ep[ipa_ep_idx].connect.options |= SPS_O_POLL;
  880. ipa_ctx->ep[ipa_ep_idx].connect.desc.size = sys_in->desc_fifo_sz;
  881. ipa_ctx->ep[ipa_ep_idx].connect.desc.base =
  882. dma_alloc_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
  883. &dma_addr, 0);
  884. ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base = dma_addr;
  885. if (ipa_ctx->ep[ipa_ep_idx].connect.desc.base == NULL) {
  886. IPAERR("fail to get DMA desc memory.\n");
  887. goto fail_mem_alloc;
  888. }
  889. ipa_ctx->ep[ipa_ep_idx].connect.event_thresh = IPA_EVENT_THRESHOLD;
  890. result = sps_connect(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
  891. &ipa_ctx->ep[ipa_ep_idx].connect);
  892. if (result) {
  893. IPAERR("sps_connect fails.\n");
  894. goto fail_sps_connect;
  895. }
  896. switch (ipa_ep_idx) {
  897. case 1:
  898. sys_idx = ipa_ep_idx;
  899. break;
  900. case 2:
  901. sys_idx = ipa_ep_idx;
  902. INIT_DELAYED_WORK(&ipa_ctx->sys[sys_idx].switch_to_intr_work,
  903. switch_to_intr_tx_work_func);
  904. break;
  905. case 3:
  906. sys_idx = ipa_ep_idx;
  907. INIT_DELAYED_WORK(&replenish_rx_work, replenish_rx_work_func);
  908. INIT_DELAYED_WORK(&ipa_ctx->sys[sys_idx].switch_to_intr_work,
  909. switch_to_intr_rx_work_func);
  910. break;
  911. case WLAN_AMPDU_TX_EP:
  912. sys_idx = IPA_A5_WLAN_AMPDU_OUT;
  913. break;
  914. default:
  915. IPAERR("Invalid EP index.\n");
  916. result = -EFAULT;
  917. goto fail_register_event;
  918. }
  919. if (!ipa_ctx->polling_mode) {
  920. ipa_ctx->sys[sys_idx].event.options = SPS_O_EOT;
  921. ipa_ctx->sys[sys_idx].event.mode = SPS_TRIGGER_CALLBACK;
  922. ipa_ctx->sys[sys_idx].event.xfer_done = NULL;
  923. ipa_ctx->sys[sys_idx].event.user =
  924. &ipa_ctx->sys[sys_idx];
  925. ipa_ctx->sys[sys_idx].event.callback =
  926. IPA_CLIENT_IS_CONS(sys_in->client) ?
  927. ipa_sps_irq_rx_notify :
  928. (sys_in->client ==
  929. IPA_CLIENT_A5_LAN_WAN_PROD ?
  930. ipa_sps_irq_tx_notify :
  931. ipa_sps_irq_tx_no_aggr_notify);
  932. result = sps_register_event(ipa_ctx->ep[ipa_ep_idx].ep_hdl,
  933. &ipa_ctx->sys[sys_idx].event);
  934. if (result < 0) {
  935. IPAERR("register event error %d\n", result);
  936. goto fail_register_event;
  937. }
  938. }
  939. *clnt_hdl = ipa_ep_idx;
  940. IPADBG("client %d (ep: %d) connected\n", sys_in->client, ipa_ep_idx);
  941. return 0;
  942. fail_register_event:
  943. sps_disconnect(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
  944. fail_sps_connect:
  945. dma_free_coherent(NULL, ipa_ctx->ep[ipa_ep_idx].connect.desc.size,
  946. ipa_ctx->ep[ipa_ep_idx].connect.desc.base,
  947. ipa_ctx->ep[ipa_ep_idx].connect.desc.phys_base);
  948. fail_mem_alloc:
  949. sps_free_endpoint(ipa_ctx->ep[ipa_ep_idx].ep_hdl);
  950. fail_sps_api:
  951. memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
  952. fail_bad_param:
  953. return result;
  954. }
  955. EXPORT_SYMBOL(ipa_setup_sys_pipe);
  956. /**
  957. * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
  958. * @clnt_hdl: [in] the handle obtained from ipa_setup_sys_pipe
  959. *
  960. * Returns: 0 on success, negative on failure
  961. */
  962. int ipa_teardown_sys_pipe(u32 clnt_hdl)
  963. {
  964. if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
  965. IPAERR("bad parm.\n");
  966. return -EINVAL;
  967. }
  968. sps_disconnect(ipa_ctx->ep[clnt_hdl].ep_hdl);
  969. dma_free_coherent(NULL, ipa_ctx->ep[clnt_hdl].connect.desc.size,
  970. ipa_ctx->ep[clnt_hdl].connect.desc.base,
  971. ipa_ctx->ep[clnt_hdl].connect.desc.phys_base);
  972. sps_free_endpoint(ipa_ctx->ep[clnt_hdl].ep_hdl);
  973. memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
  974. IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
  975. return 0;
  976. }
  977. EXPORT_SYMBOL(ipa_teardown_sys_pipe);
  978. /**
  979. * ipa_tx_comp_usr_notify_release() - Callback function which will call the
  980. * user supplied callback function to release the skb, or release it on
  981. * its own if no callback function was supplied.
  982. * @user1
  983. * @user2
  984. *
  985. * This notified callback is for the destination client.
  986. * This function is supplied in ipa_connect.
  987. */
  988. static void ipa_tx_comp_usr_notify_release(void *user1, void *user2)
  989. {
  990. struct sk_buff *skb = (struct sk_buff *)user1;
  991. u32 ep_idx = (u32)user2;
  992. IPADBG("skb=%p ep=%d\n", skb, ep_idx);
  993. IPA_STATS_INC_TX_CNT(ep_idx, ipa_ctx->stats.tx_sw_pkts,
  994. ipa_ctx->stats.tx_hw_pkts);
  995. if (ipa_ctx->ep[ep_idx].client_notify)
  996. ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
  997. IPA_WRITE_DONE, (unsigned long)skb);
  998. else
  999. dev_kfree_skb(skb);
  1000. }
  1001. static void ipa_tx_cmd_comp(void *user1, void *user2)
  1002. {
  1003. kfree(user1);
  1004. }
  1005. /**
  1006. * ipa_tx_dp() - Data-path tx handler
  1007. * @dst: [in] which IPA destination to route tx packets to
  1008. * @skb: [in] the packet to send
  1009. * @metadata: [in] TX packet meta-data
  1010. *
  1011. * Data-path tx handler, this is used for both SW data-path which by-passes most
  1012. * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
  1013. * dst is a "valid" CONS type, then SW data-path is used. If dst is the
  1014. * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
  1015. * is an error. For errors, client needs to free the skb as needed. For success,
  1016. * IPA driver will later invoke client callback if one was supplied. That
  1017. * callback should free the skb. If no callback supplied, IPA driver will free
  1018. * the skb internally
  1019. *
  1020. * The function will use two descriptors for this send command
  1021. * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
  1022. * the first descriptor will be used to inform the IPA hardware that
  1023. * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
  1024. * Once this send was done from SPS point-of-view the IPA driver will
  1025. * get notified by the supplied callback - ipa_sps_irq_tx_comp()
  1026. *
  1027. * ipa_sps_irq_tx_comp will call to the user supplied
  1028. * callback (from ipa_connect)
  1029. *
  1030. * Returns: 0 on success, negative on failure
  1031. */
  1032. int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
  1033. struct ipa_tx_meta *meta)
  1034. {
  1035. struct ipa_desc desc[2];
  1036. int ipa_ep_idx;
  1037. struct ipa_ip_packet_init *cmd;
  1038. memset(&desc, 0, 2 * sizeof(struct ipa_desc));
  1039. ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, dst);
  1040. if (unlikely(ipa_ep_idx == -1)) {
  1041. IPAERR("dest EP does not exist.\n");
  1042. goto fail_gen;
  1043. }
  1044. if (unlikely(ipa_ctx->ep[ipa_ep_idx].valid == 0)) {
  1045. IPAERR("dest EP not valid.\n");
  1046. goto fail_gen;
  1047. }
  1048. if (IPA_CLIENT_IS_CONS(dst)) {
  1049. cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_ATOMIC);
  1050. if (!cmd) {
  1051. IPAERR("failed to alloc immediate command object\n");
  1052. goto fail_mem_alloc;
  1053. }
  1054. cmd->destination_pipe_index = ipa_ep_idx;
  1055. if (meta && meta->mbim_stream_id_valid)
  1056. cmd->metadata = meta->mbim_stream_id;
  1057. desc[0].opcode = IPA_IP_PACKET_INIT;
  1058. desc[0].pyld = cmd;
  1059. desc[0].len = sizeof(struct ipa_ip_packet_init);
  1060. desc[0].type = IPA_IMM_CMD_DESC;
  1061. desc[0].callback = ipa_tx_cmd_comp;
  1062. desc[0].user1 = cmd;
  1063. desc[1].pyld = skb->data;
  1064. desc[1].len = skb->len;
  1065. desc[1].type = IPA_DATA_DESC_SKB;
  1066. desc[1].callback = ipa_tx_comp_usr_notify_release;
  1067. desc[1].user1 = skb;
  1068. desc[1].user2 = (void *)ipa_ep_idx;
  1069. if (ipa_send(&ipa_ctx->sys[IPA_A5_LAN_WAN_OUT], 2, desc,
  1070. true)) {
  1071. IPAERR("fail to send immediate command\n");
  1072. goto fail_send;
  1073. }
  1074. IPA_STATS_INC_CNT(ipa_ctx->stats.imm_cmds[IPA_IP_PACKET_INIT]);
  1075. } else if (dst == IPA_CLIENT_A5_WLAN_AMPDU_PROD) {
  1076. desc[0].pyld = skb->data;
  1077. desc[0].len = skb->len;
  1078. desc[0].type = IPA_DATA_DESC_SKB;
  1079. desc[0].callback = ipa_tx_comp_usr_notify_release;
  1080. desc[0].user1 = skb;
  1081. desc[0].user2 = (void *)ipa_ep_idx;
  1082. if (ipa_send_one(&ipa_ctx->sys[IPA_A5_WLAN_AMPDU_OUT],
  1083. &desc[0], true)) {
  1084. IPAERR("fail to send skb\n");
  1085. goto fail_gen;
  1086. }
  1087. } else {
  1088. IPAERR("%d PROD is not supported.\n", dst);
  1089. goto fail_gen;
  1090. }
  1091. return 0;
  1092. fail_send:
  1093. kfree(cmd);
  1094. fail_mem_alloc:
  1095. fail_gen:
  1096. return -EFAULT;
  1097. }
  1098. EXPORT_SYMBOL(ipa_tx_dp);
  1099. static void ipa_wq_handle_rx(struct work_struct *work)
  1100. {
  1101. ipa_handle_rx(&ipa_ctx->sys[IPA_A5_LAN_WAN_IN]);
  1102. }
  1103. /**
  1104. * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
  1105. *
  1106. * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
  1107. * are IPA_RX_POOL_CEIL buffers in the cache.
  1108. * - Allocate a buffer in the cache
  1109. * - Initialized the packets link
  1110. * - Initialize the packets work struct
  1111. * - Allocate the packets socket buffer (skb)
  1112. * - Fill the packets skb with data
  1113. * - Make the packet DMAable
  1114. * - Add the packet to the system pipe linked list
  1115. * - Initiate a SPS transfer so that SPS driver will use this packet later.
  1116. */
  1117. void ipa_replenish_rx_cache(void)
  1118. {
  1119. void *ptr;
  1120. struct ipa_rx_pkt_wrapper *rx_pkt;
  1121. int ret;
  1122. int rx_len_cached = 0;
  1123. struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
  1124. gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
  1125. rx_len_cached = sys->len;
  1126. while (rx_len_cached < IPA_RX_POOL_CEIL) {
  1127. rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
  1128. flag);
  1129. if (!rx_pkt) {
  1130. IPAERR("failed to alloc rx wrapper\n");
  1131. goto fail_kmem_cache_alloc;
  1132. }
  1133. INIT_LIST_HEAD(&rx_pkt->link);
  1134. rx_pkt->skb = __dev_alloc_skb(IPA_RX_SKB_SIZE, flag);
  1135. if (rx_pkt->skb == NULL) {
  1136. IPAERR("failed to alloc skb\n");
  1137. goto fail_skb_alloc;
  1138. }
  1139. ptr = skb_put(rx_pkt->skb, IPA_RX_SKB_SIZE);
  1140. rx_pkt->dma_address = dma_map_single(NULL, ptr,
  1141. IPA_RX_SKB_SIZE,
  1142. DMA_FROM_DEVICE);
  1143. if (rx_pkt->dma_address == 0 || rx_pkt->dma_address == ~0) {
  1144. IPAERR("dma_map_single failure %p for %p\n",
  1145. (void *)rx_pkt->dma_address, ptr);
  1146. goto fail_dma_mapping;
  1147. }
  1148. list_add_tail(&rx_pkt->link, &sys->head_desc_list);
  1149. rx_len_cached = ++sys->len;
  1150. ret = sps_transfer_one(sys->ep->ep_hdl, rx_pkt->dma_address,
  1151. IPA_RX_SKB_SIZE, rx_pkt,
  1152. 0);
  1153. if (ret) {
  1154. IPAERR("sps_transfer_one failed %d\n", ret);
  1155. goto fail_sps_transfer;
  1156. }
  1157. }
  1158. ipa_ctx->stats.rx_q_len = sys->len;
  1159. return;
  1160. fail_sps_transfer:
  1161. list_del(&rx_pkt->link);
  1162. rx_len_cached = --sys->len;
  1163. dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
  1164. DMA_FROM_DEVICE);
  1165. fail_dma_mapping:
  1166. dev_kfree_skb(rx_pkt->skb);
  1167. fail_skb_alloc:
  1168. kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1169. fail_kmem_cache_alloc:
  1170. if (rx_len_cached == 0) {
  1171. IPA_STATS_INC_CNT(ipa_ctx->stats.rx_repl_repost);
  1172. schedule_delayed_work(&replenish_rx_work,
  1173. msecs_to_jiffies(100));
  1174. }
  1175. ipa_ctx->stats.rx_q_len = sys->len;
  1176. return;
  1177. }
  1178. static void replenish_rx_work_func(struct work_struct *work)
  1179. {
  1180. ipa_replenish_rx_cache();
  1181. }
  1182. /**
  1183. * ipa_cleanup_rx() - release RX queue resources
  1184. *
  1185. */
  1186. void ipa_cleanup_rx(void)
  1187. {
  1188. struct ipa_rx_pkt_wrapper *rx_pkt;
  1189. struct ipa_rx_pkt_wrapper *r;
  1190. struct ipa_sys_context *sys = &ipa_ctx->sys[IPA_A5_LAN_WAN_IN];
  1191. list_for_each_entry_safe(rx_pkt, r,
  1192. &sys->head_desc_list, link) {
  1193. list_del(&rx_pkt->link);
  1194. dma_unmap_single(NULL, rx_pkt->dma_address, IPA_RX_SKB_SIZE,
  1195. DMA_FROM_DEVICE);
  1196. dev_kfree_skb(rx_pkt->skb);
  1197. kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
  1198. }
  1199. }