dwc_otg_hcd_ddma.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123
  1. /*==========================================================================
  2. * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_ddma.c $
  3. * $Revision: #10 $
  4. * $Date: 2011/10/20 $
  5. * $Change: 1869464 $
  6. *
  7. * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
  8. * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
  9. * otherwise expressly agreed to in writing between Synopsys and you.
  10. *
  11. * The Software IS NOT an item of Licensed Software or Licensed Product under
  12. * any End User Software License Agreement or Agreement for Licensed Product
  13. * with Synopsys or any supplement thereto. You are permitted to use and
  14. * redistribute this Software in source and binary forms, with or without
  15. * modification, provided that redistributions of source code must retain this
  16. * notice. You may not view, use, disclose, copy or distribute this file or
  17. * any information contained herein except pursuant to this license grant from
  18. * Synopsys. If you do not agree with this notice, including the disclaimer
  19. * below, then you are not authorized to use the Software.
  20. *
  21. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
  22. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  23. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  24. * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
  25. * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  26. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  27. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  28. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  29. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  30. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  31. * DAMAGE.
  32. * ========================================================================== */
  33. #ifndef DWC_DEVICE_ONLY
  34. /** @file
  35. * This file contains Descriptor DMA support implementation for host mode.
  36. */
  37. #include "dwc_otg_hcd.h"
  38. #include "dwc_otg_regs.h"
  39. static inline uint8_t frame_list_idx(uint16_t frame)
  40. {
  41. return (frame & (MAX_FRLIST_EN_NUM - 1));
  42. }
  43. static inline uint16_t desclist_idx_inc(uint16_t idx, uint16_t inc, uint8_t speed)
  44. {
  45. return (idx + inc) &
  46. (((speed ==
  47. DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
  48. MAX_DMA_DESC_NUM_GENERIC) - 1);
  49. }
  50. static inline uint16_t desclist_idx_dec(uint16_t idx, uint16_t inc, uint8_t speed)
  51. {
  52. return (idx - inc) &
  53. (((speed ==
  54. DWC_OTG_EP_SPEED_HIGH) ? MAX_DMA_DESC_NUM_HS_ISOC :
  55. MAX_DMA_DESC_NUM_GENERIC) - 1);
  56. }
  57. static inline uint16_t max_desc_num(dwc_otg_qh_t * qh)
  58. {
  59. return (((qh->ep_type == UE_ISOCHRONOUS)
  60. && (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH))
  61. ? MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC);
  62. }
  63. static inline uint16_t frame_incr_val(dwc_otg_qh_t * qh)
  64. {
  65. return ((qh->dev_speed == DWC_OTG_EP_SPEED_HIGH)
  66. ? ((qh->interval + 8 - 1) / 8)
  67. : qh->interval);
  68. }
  69. static int desc_list_alloc(dwc_otg_qh_t * qh)
  70. {
  71. int retval = 0;
  72. qh->desc_list = (dwc_otg_host_dma_desc_t *)
  73. DWC_DMA_ALLOC(sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh),
  74. &qh->desc_list_dma);
  75. if (!qh->desc_list) {
  76. retval = -DWC_E_NO_MEMORY;
  77. DWC_ERROR("%s: DMA descriptor list allocation failed\n", __func__);
  78. }
  79. dwc_memset(qh->desc_list, 0x00,
  80. sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
  81. qh->n_bytes =
  82. (uint32_t *) DWC_ALLOC(sizeof(uint32_t) * max_desc_num(qh));
  83. if (!qh->n_bytes) {
  84. retval = -DWC_E_NO_MEMORY;
  85. DWC_ERROR
  86. ("%s: Failed to allocate array for descriptors' size actual values\n",
  87. __func__);
  88. }
  89. return retval;
  90. }
  91. static void desc_list_free(dwc_otg_qh_t * qh)
  92. {
  93. if (qh->desc_list) {
  94. DWC_DMA_FREE(max_desc_num(qh), qh->desc_list,
  95. qh->desc_list_dma);
  96. qh->desc_list = NULL;
  97. }
  98. if (qh->n_bytes) {
  99. DWC_FREE(qh->n_bytes);
  100. qh->n_bytes = NULL;
  101. }
  102. }
  103. static int frame_list_alloc(dwc_otg_hcd_t * hcd)
  104. {
  105. int retval = 0;
  106. if (hcd->frame_list)
  107. return 0;
  108. hcd->frame_list = DWC_DMA_ALLOC(4 * MAX_FRLIST_EN_NUM,
  109. &hcd->frame_list_dma);
  110. if (!hcd->frame_list) {
  111. retval = -DWC_E_NO_MEMORY;
  112. DWC_ERROR("%s: Frame List allocation failed\n", __func__);
  113. }
  114. dwc_memset(hcd->frame_list, 0x00, 4 * MAX_FRLIST_EN_NUM);
  115. return retval;
  116. }
  117. static void frame_list_free(dwc_otg_hcd_t * hcd)
  118. {
  119. if (!hcd->frame_list)
  120. return;
  121. DWC_DMA_FREE(4 * MAX_FRLIST_EN_NUM, hcd->frame_list, hcd->frame_list_dma);
  122. hcd->frame_list = NULL;
  123. }
  124. static void per_sched_enable(dwc_otg_hcd_t * hcd, uint16_t fr_list_en)
  125. {
  126. hcfg_data_t hcfg;
  127. hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
  128. if (hcfg.b.perschedena) {
  129. /* already enabled */
  130. return;
  131. }
  132. DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hflbaddr,
  133. hcd->frame_list_dma);
  134. switch (fr_list_en) {
  135. case 64:
  136. hcfg.b.frlisten = 3;
  137. break;
  138. case 32:
  139. hcfg.b.frlisten = 2;
  140. break;
  141. case 16:
  142. hcfg.b.frlisten = 1;
  143. break;
  144. case 8:
  145. hcfg.b.frlisten = 0;
  146. break;
  147. default:
  148. break;
  149. }
  150. hcfg.b.perschedena = 1;
  151. DWC_DEBUGPL(DBG_HCD, "Enabling Periodic schedule\n");
  152. DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
  153. }
  154. static void per_sched_disable(dwc_otg_hcd_t * hcd)
  155. {
  156. hcfg_data_t hcfg;
  157. hcfg.d32 = DWC_READ_REG32(&hcd->core_if->host_if->host_global_regs->hcfg);
  158. if (!hcfg.b.perschedena) {
  159. /* already disabled */
  160. return;
  161. }
  162. hcfg.b.perschedena = 0;
  163. DWC_DEBUGPL(DBG_HCD, "Disabling Periodic schedule\n");
  164. DWC_WRITE_REG32(&hcd->core_if->host_if->host_global_regs->hcfg, hcfg.d32);
  165. }
  166. /*
  167. * Activates/Deactivates FrameList entries for the channel
  168. * based on endpoint servicing period.
  169. */
  170. void update_frame_list(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh, uint8_t enable)
  171. {
  172. uint16_t i, j, inc;
  173. dwc_hc_t *hc = NULL;
  174. if (!qh->channel) {
  175. DWC_ERROR("qh->channel = %p", qh->channel);
  176. return;
  177. }
  178. if (!hcd) {
  179. DWC_ERROR("------hcd = %p", hcd);
  180. return;
  181. }
  182. if (!hcd->frame_list) {
  183. DWC_ERROR("-------hcd->frame_list = %p", hcd->frame_list);
  184. return;
  185. }
  186. hc = qh->channel;
  187. inc = frame_incr_val(qh);
  188. if (qh->ep_type == UE_ISOCHRONOUS)
  189. i = frame_list_idx(qh->sched_frame);
  190. else
  191. i = 0;
  192. j = i;
  193. do {
  194. if (enable)
  195. hcd->frame_list[j] |= (1 << hc->hc_num);
  196. else
  197. hcd->frame_list[j] &= ~(1 << hc->hc_num);
  198. j = (j + inc) & (MAX_FRLIST_EN_NUM - 1);
  199. }
  200. while (j != i);
  201. if (!enable)
  202. return;
  203. hc->schinfo = 0;
  204. if (qh->channel->speed == DWC_OTG_EP_SPEED_HIGH) {
  205. j = 1;
  206. /* TODO - check this */
  207. inc = (8 + qh->interval - 1) / qh->interval;
  208. for (i = 0; i < inc; i++) {
  209. hc->schinfo |= j;
  210. j = j << qh->interval;
  211. }
  212. } else {
  213. hc->schinfo = 0xff;
  214. }
  215. }
  216. #if 1
  217. void dump_frame_list(dwc_otg_hcd_t * hcd)
  218. {
  219. int i = 0;
  220. DWC_PRINTF("--FRAME LIST (hex) --\n");
  221. for (i = 0; i < MAX_FRLIST_EN_NUM; i++) {
  222. DWC_PRINTF("%x\t", hcd->frame_list[i]);
  223. if (!(i % 8) && i)
  224. DWC_PRINTF("\n");
  225. }
  226. DWC_PRINTF("\n----\n");
  227. }
  228. #endif
  229. static void release_channel_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
  230. {
  231. dwc_hc_t *hc = qh->channel;
  232. if (dwc_qh_is_non_per(qh))
  233. hcd->non_periodic_channels--;
  234. else
  235. update_frame_list(hcd, qh, 0);
  236. /*
  237. * The condition is added to prevent double cleanup try in case of device
  238. * disconnect. See channel cleanup in dwc_otg_hcd_disconnect_cb().
  239. */
  240. if (hc->qh) {
  241. dwc_otg_hc_cleanup(hcd->core_if, hc);
  242. DWC_CIRCLEQ_INSERT_TAIL(&hcd->free_hc_list, hc, hc_list_entry);
  243. hc->qh = NULL;
  244. }
  245. qh->channel = NULL;
  246. qh->ntd = 0;
  247. if (qh->desc_list) {
  248. dwc_memset(qh->desc_list, 0x00,
  249. sizeof(dwc_otg_host_dma_desc_t) * max_desc_num(qh));
  250. }
  251. }
  252. /**
  253. * Initializes a QH structure's Descriptor DMA related members.
  254. * Allocates memory for descriptor list.
  255. * On first periodic QH, allocates memory for FrameList
  256. * and enables periodic scheduling.
  257. *
  258. * @param hcd The HCD state structure for the DWC OTG controller.
  259. * @param qh The QH to init.
  260. *
  261. * @return 0 if successful, negative error code otherwise.
  262. */
  263. int dwc_otg_hcd_qh_init_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
  264. {
  265. int retval = 0;
  266. if (qh->do_split) {
  267. DWC_ERROR("SPLIT Transfers are not supported in Descriptor DMA.\n");
  268. return -1;
  269. }
  270. retval = desc_list_alloc(qh);
  271. if ((retval == 0)
  272. && (qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)) {
  273. if (!hcd->frame_list) {
  274. retval = frame_list_alloc(hcd);
  275. /* Enable periodic schedule on first periodic QH */
  276. if (retval == 0)
  277. per_sched_enable(hcd, MAX_FRLIST_EN_NUM);
  278. }
  279. }
  280. qh->ntd = 0;
  281. return retval;
  282. }
  283. /**
  284. * Frees descriptor list memory associated with the QH.
  285. * If QH is periodic and the last, frees FrameList memory
  286. * and disables periodic scheduling.
  287. *
  288. * @param hcd The HCD state structure for the DWC OTG controller.
  289. * @param qh The QH to init.
  290. */
  291. void dwc_otg_hcd_qh_free_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
  292. {
  293. desc_list_free(qh);
  294. /*
  295. * Channel still assigned due to some reasons.
  296. * Seen on Isoc URB dequeue. Channel halted but no subsequent
  297. * ChHalted interrupt to release the channel. Afterwards
  298. * when it comes here from endpoint disable routine
  299. * channel remains assigned.
  300. */
  301. if (qh->channel)
  302. release_channel_ddma(hcd, qh);
  303. if ((qh->ep_type == UE_ISOCHRONOUS || qh->ep_type == UE_INTERRUPT)
  304. && !hcd->periodic_channels && hcd->frame_list) {
  305. per_sched_disable(hcd);
  306. frame_list_free(hcd);
  307. }
  308. }
  309. static uint8_t frame_to_desc_idx(dwc_otg_qh_t * qh, uint16_t frame_idx)
  310. {
  311. if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
  312. /*
  313. * Descriptor set(8 descriptors) index
  314. * which is 8-aligned.
  315. */
  316. return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
  317. } else {
  318. return (frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1));
  319. }
  320. }
  321. /*
  322. * Determine starting frame for Isochronous transfer.
  323. * Few frames skipped to prevent race condition with HC.
  324. */
  325. static uint8_t calc_starting_frame(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
  326. uint8_t * skip_frames)
  327. {
  328. uint16_t frame = 0;
  329. hcd->frame_number = dwc_otg_hcd_get_frame_number(hcd);
  330. /* sched_frame is always frame number(not uFrame) both in FS and HS !! */
  331. /*
  332. * skip_frames is used to limit activated descriptors number
  333. * to avoid the situation when HC services the last activated
  334. * descriptor firstly.
  335. * Example for FS:
  336. * Current frame is 1, scheduled frame is 3. Since HC always fetches the descriptor
  337. * corresponding to curr_frame+1, the descriptor corresponding to frame 2
  338. * will be fetched. If the number of descriptors is max=64 (or greather) the
  339. * list will be fully programmed with Active descriptors and it is possible
  340. * case(rare) that the latest descriptor(considering rollback) corresponding
  341. * to frame 2 will be serviced first. HS case is more probable because, in fact,
  342. * up to 11 uframes(16 in the code) may be skipped.
  343. */
  344. if (qh->dev_speed == DWC_OTG_EP_SPEED_HIGH) {
  345. /*
  346. * Consider uframe counter also, to start xfer asap.
  347. * If half of the frame elapsed skip 2 frames otherwise
  348. * just 1 frame.
  349. * Starting descriptor index must be 8-aligned, so
  350. * if the current frame is near to complete the next one
  351. * is skipped as well.
  352. */
  353. if (dwc_micro_frame_num(hcd->frame_number) >= 5) {
  354. *skip_frames = 2 * 8;
  355. frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
  356. } else {
  357. *skip_frames = 1 * 8;
  358. frame = dwc_frame_num_inc(hcd->frame_number, *skip_frames);
  359. }
  360. frame = dwc_full_frame_num(frame);
  361. } else {
  362. /*
  363. * Two frames are skipped for FS - the current and the next.
  364. * But for descriptor programming, 1 frame(descriptor) is enough,
  365. * see example above.
  366. */
  367. *skip_frames = 1;
  368. frame = dwc_frame_num_inc(hcd->frame_number, 2);
  369. }
  370. return frame;
  371. }
  372. /*
  373. * Calculate initial descriptor index for isochronous transfer
  374. * based on scheduled frame.
  375. */
  376. static uint8_t recalc_initial_desc_idx(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
  377. {
  378. uint16_t frame = 0, fr_idx, fr_idx_tmp;
  379. uint8_t skip_frames = 0;
  380. /*
  381. * With current ISOC processing algorithm the channel is being
  382. * released when no more QTDs in the list(qh->ntd == 0).
  383. * Thus this function is called only when qh->ntd == 0 and qh->channel == 0.
  384. *
  385. * So qh->channel != NULL branch is not used and just not removed from the
  386. * source file. It is required for another possible approach which is,
  387. * do not disable and release the channel when ISOC session completed,
  388. * just move QH to inactive schedule until new QTD arrives.
  389. * On new QTD, the QH moved back to 'ready' schedule,
  390. * starting frame and therefore starting desc_index are recalculated.
  391. * In this case channel is released only on ep_disable.
  392. */
  393. /* Calculate starting descriptor index. For INTERRUPT endpoint it is always 0. */
  394. if (qh->channel) {
  395. frame = calc_starting_frame(hcd, qh, &skip_frames);
  396. /*
  397. * Calculate initial descriptor index based on FrameList current bitmap
  398. * and servicing period.
  399. */
  400. fr_idx_tmp = frame_list_idx(frame);
  401. fr_idx =
  402. (MAX_FRLIST_EN_NUM + frame_list_idx(qh->sched_frame) -
  403. fr_idx_tmp)
  404. % frame_incr_val(qh);
  405. fr_idx = (fr_idx + fr_idx_tmp) % MAX_FRLIST_EN_NUM;
  406. } else {
  407. qh->sched_frame = calc_starting_frame(hcd, qh, &skip_frames);
  408. fr_idx = frame_list_idx(qh->sched_frame);
  409. }
  410. qh->td_first = qh->td_last = frame_to_desc_idx(qh, fr_idx);
  411. return skip_frames;
  412. }
  413. #define ISOC_URB_GIVEBACK_ASAP
  414. #define MAX_ISOC_XFER_SIZE_FS 1023
  415. #define MAX_ISOC_XFER_SIZE_HS 3072
  416. #define DESCNUM_THRESHOLD 4
  417. static void init_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh,
  418. uint8_t skip_frames)
  419. {
  420. struct dwc_otg_hcd_iso_packet_desc *frame_desc;
  421. dwc_otg_qtd_t *qtd;
  422. dwc_otg_host_dma_desc_t *dma_desc;
  423. uint16_t idx, inc, n_desc, ntd_max, max_xfer_size;
  424. idx = qh->td_last;
  425. inc = qh->interval;
  426. n_desc = 0;
  427. ntd_max = (max_desc_num(qh) + qh->interval - 1) / qh->interval;
  428. if (skip_frames && !qh->channel)
  429. ntd_max = ntd_max - skip_frames / qh->interval;
  430. max_xfer_size =
  431. (qh->dev_speed ==
  432. DWC_OTG_EP_SPEED_HIGH) ? MAX_ISOC_XFER_SIZE_HS :
  433. MAX_ISOC_XFER_SIZE_FS;
  434. DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
  435. while ((qh->ntd < ntd_max)
  436. && (qtd->isoc_frame_index_last <
  437. qtd->urb->packet_count)) {
  438. dma_desc = &qh->desc_list[idx];
  439. dwc_memset(dma_desc, 0x00, sizeof(dwc_otg_host_dma_desc_t));
  440. frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
  441. if (frame_desc->length > max_xfer_size)
  442. qh->n_bytes[idx] = max_xfer_size;
  443. else
  444. qh->n_bytes[idx] = frame_desc->length;
  445. dma_desc->status.b_isoc.n_bytes = qh->n_bytes[idx];
  446. dma_desc->status.b_isoc.a = 1;
  447. dma_desc->status.b_isoc.sts = 0;
  448. dma_desc->buf = qtd->urb->dma + frame_desc->offset;
  449. qh->ntd++;
  450. qtd->isoc_frame_index_last++;
  451. #ifdef ISOC_URB_GIVEBACK_ASAP
  452. /*
  453. * Set IOC for each descriptor corresponding to the
  454. * last frame of the URB.
  455. */
  456. if (qtd->isoc_frame_index_last ==
  457. qtd->urb->packet_count)
  458. dma_desc->status.b_isoc.ioc = 1;
  459. #endif
  460. idx = desclist_idx_inc(idx, inc, qh->dev_speed);
  461. n_desc++;
  462. }
  463. qtd->in_process = 1;
  464. }
  465. qh->td_last = idx;
  466. #ifdef ISOC_URB_GIVEBACK_ASAP
  467. /* Set IOC for the last descriptor if descriptor list is full */
  468. if (qh->ntd == ntd_max) {
  469. idx = desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
  470. qh->desc_list[idx].status.b_isoc.ioc = 1;
  471. }
  472. #else
  473. /*
  474. * Set IOC bit only for one descriptor.
  475. * Always try to be ahead of HW processing,
  476. * i.e. on IOC generation driver activates next descriptors but
  477. * core continues to process descriptors followed the one with IOC set.
  478. */
  479. if (n_desc > DESCNUM_THRESHOLD) {
  480. /*
  481. * Move IOC "up". Required even if there is only one QTD
  482. * in the list, cause QTDs migth continue to be queued,
  483. * but during the activation it was only one queued.
  484. * Actually more than one QTD might be in the list if this function called
  485. * from XferCompletion - QTDs was queued during HW processing of the previous
  486. * descriptor chunk.
  487. */
  488. idx = dwc_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2), qh->dev_speed);
  489. } else {
  490. /*
  491. * Set the IOC for the latest descriptor
  492. * if either number of descriptor is not greather than threshold
  493. * or no more new descriptors activated.
  494. */
  495. idx = dwc_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
  496. }
  497. qh->desc_list[idx].status.b_isoc.ioc = 1;
  498. #endif
  499. }
  500. static void init_non_isoc_dma_desc(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
  501. {
  502. dwc_hc_t *hc;
  503. dwc_otg_host_dma_desc_t *dma_desc;
  504. dwc_otg_qtd_t *qtd;
  505. int num_packets, len, n_desc = 0;
  506. hc = qh->channel;
  507. /*
  508. * Start with hc->xfer_buff initialized in
  509. * assign_and_init_hc(), then if SG transfer consists of multiple URBs,
  510. * this pointer re-assigned to the buffer of the currently processed QTD.
  511. * For non-SG request there is always one QTD active.
  512. */
  513. DWC_CIRCLEQ_FOREACH(qtd, &qh->qtd_list, qtd_list_entry) {
  514. if (n_desc) {
  515. /* SG request - more than 1 QTDs */
  516. hc->xfer_buff = (uint8_t *)qtd->urb->dma + qtd->urb->actual_length;
  517. hc->xfer_len = qtd->urb->length - qtd->urb->actual_length;
  518. }
  519. qtd->n_desc = 0;
  520. do {
  521. dma_desc = &qh->desc_list[n_desc];
  522. len = hc->xfer_len;
  523. if (len > MAX_DMA_DESC_SIZE)
  524. len = MAX_DMA_DESC_SIZE - hc->max_packet + 1;
  525. if (hc->ep_is_in) {
  526. if (len > 0) {
  527. num_packets = (len + hc->max_packet - 1) / hc->max_packet;
  528. } else {
  529. /* Need 1 packet for transfer length of 0. */
  530. num_packets = 1;
  531. }
  532. /* Always program an integral # of max packets for IN transfers. */
  533. len = num_packets * hc->max_packet;
  534. }
  535. dma_desc->status.b.n_bytes = len;
  536. qh->n_bytes[n_desc] = len;
  537. if ((qh->ep_type == UE_CONTROL)
  538. && (qtd->control_phase == DWC_OTG_CONTROL_SETUP))
  539. dma_desc->status.b.sup = 1; /* Setup Packet */
  540. dma_desc->status.b.a = 1; /* Active descriptor */
  541. dma_desc->status.b.sts = 0;
  542. dma_desc->buf =
  543. ((unsigned long)hc->xfer_buff & 0xffffffff);
  544. /*
  545. * Last descriptor(or single) of IN transfer
  546. * with actual size less than MaxPacket.
  547. */
  548. if (len > hc->xfer_len) {
  549. hc->xfer_len = 0;
  550. } else {
  551. hc->xfer_buff += len;
  552. hc->xfer_len -= len;
  553. }
  554. qtd->n_desc++;
  555. n_desc++;
  556. }
  557. while ((hc->xfer_len > 0) && (n_desc != MAX_DMA_DESC_NUM_GENERIC));
  558. qtd->in_process = 1;
  559. if (qh->ep_type == UE_CONTROL)
  560. break;
  561. if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
  562. break;
  563. }
  564. if (n_desc) {
  565. /* Request Transfer Complete interrupt for the last descriptor */
  566. qh->desc_list[n_desc - 1].status.b.ioc = 1;
  567. /* End of List indicator */
  568. qh->desc_list[n_desc - 1].status.b.eol = 1;
  569. hc->ntd = n_desc;
  570. }
  571. }
  572. /**
  573. * For Control and Bulk endpoints initializes descriptor list
  574. * and starts the transfer.
  575. *
  576. * For Interrupt and Isochronous endpoints initializes descriptor list
  577. * then updates FrameList, marking appropriate entries as active.
  578. * In case of Isochronous, the starting descriptor index is calculated based
  579. * on the scheduled frame, but only on the first transfer descriptor within a session.
  580. * Then starts the transfer via enabling the channel.
  581. * For Isochronous endpoint the channel is not halted on XferComplete
  582. * interrupt so remains assigned to the endpoint(QH) until session is done.
  583. *
  584. * @param hcd The HCD state structure for the DWC OTG controller.
  585. * @param qh The QH to init.
  586. *
  587. * @return 0 if successful, negative error code otherwise.
  588. */
  589. void dwc_otg_hcd_start_xfer_ddma(dwc_otg_hcd_t * hcd, dwc_otg_qh_t * qh)
  590. {
  591. /* Channel is already assigned */
  592. dwc_hc_t *hc = qh->channel;
  593. uint8_t skip_frames = 0;
  594. switch (hc->ep_type) {
  595. case DWC_OTG_EP_TYPE_CONTROL:
  596. case DWC_OTG_EP_TYPE_BULK:
  597. init_non_isoc_dma_desc(hcd, qh);
  598. dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
  599. break;
  600. case DWC_OTG_EP_TYPE_INTR:
  601. init_non_isoc_dma_desc(hcd, qh);
  602. update_frame_list(hcd, qh, 1);
  603. dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
  604. break;
  605. case DWC_OTG_EP_TYPE_ISOC:
  606. if (!qh->ntd)
  607. skip_frames = recalc_initial_desc_idx(hcd, qh);
  608. init_isoc_dma_desc(hcd, qh, skip_frames);
  609. if (!hc->xfer_started) {
  610. update_frame_list(hcd, qh, 1);
  611. /*
  612. * Always set to max, instead of actual size.
  613. * Otherwise ntd will be changed with
  614. * channel being enabled. Not recommended.
  615. *
  616. */
  617. hc->ntd = max_desc_num(qh);
  618. /* Enable channel only once for ISOC */
  619. dwc_otg_hc_start_transfer_ddma(hcd->core_if, hc);
  620. }
  621. break;
  622. default:
  623. break;
  624. }
  625. }
  626. static void complete_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
  627. dwc_hc_t * hc,
  628. dwc_otg_hc_regs_t * hc_regs,
  629. dwc_otg_halt_status_e halt_status)
  630. {
  631. struct dwc_otg_hcd_iso_packet_desc *frame_desc;
  632. dwc_otg_qtd_t *qtd, *qtd_tmp;
  633. dwc_otg_qh_t *qh;
  634. dwc_otg_host_dma_desc_t *dma_desc;
  635. uint16_t idx, remain;
  636. uint8_t urb_compl;
  637. qh = hc->qh;
  638. idx = qh->td_first;
  639. if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
  640. DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry)
  641. qtd->in_process = 0;
  642. return;
  643. } else if ((halt_status == DWC_OTG_HC_XFER_AHB_ERR) ||
  644. (halt_status == DWC_OTG_HC_XFER_BABBLE_ERR)) {
  645. /*
  646. * Channel is halted in these error cases.
  647. * Considered as serious issues.
  648. * Complete all URBs marking all frames as failed,
  649. * irrespective whether some of the descriptors(frames) succeeded or no.
  650. * Pass error code to completion routine as well, to
  651. * update urb->status, some of class drivers might use it to stop
  652. * queing transfer requests.
  653. */
  654. int err = (halt_status == DWC_OTG_HC_XFER_AHB_ERR)
  655. ? (-DWC_E_IO)
  656. : (-DWC_E_OVERFLOW);
  657. DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
  658. for (idx = 0; idx < qtd->urb->packet_count; idx++) {
  659. frame_desc = &qtd->urb->iso_descs[idx];
  660. frame_desc->status = err;
  661. }
  662. hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, err);
  663. dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
  664. }
  665. return;
  666. }
  667. DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
  668. if (!qtd->in_process)
  669. break;
  670. urb_compl = 0;
  671. do {
  672. dma_desc = &qh->desc_list[idx];
  673. frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
  674. remain = hc->ep_is_in ? dma_desc->status.b_isoc.n_bytes : 0;
  675. if (dma_desc->status.b_isoc.sts == DMA_DESC_STS_PKTERR) {
  676. /*
  677. * XactError or, unable to complete all the transactions
  678. * in the scheduled micro-frame/frame,
  679. * both indicated by DMA_DESC_STS_PKTERR.
  680. */
  681. qtd->urb->error_count++;
  682. frame_desc->actual_length = qh->n_bytes[idx] - remain;
  683. frame_desc->status = -DWC_E_PROTOCOL;
  684. } else {
  685. /* Success */
  686. frame_desc->actual_length = qh->n_bytes[idx] - remain;
  687. frame_desc->status = 0;
  688. }
  689. if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
  690. /*
  691. * urb->status is not used for isoc transfers here.
  692. * The individual frame_desc status are used instead.
  693. */
  694. hcd->fops->complete(hcd, qtd->urb->priv, qtd->urb, 0);
  695. dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
  696. /*
  697. * This check is necessary because urb_dequeue can be called
  698. * from urb complete callback(sound driver example).
  699. * All pending URBs are dequeued there, so no need for
  700. * further processing.
  701. */
  702. if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
  703. return;
  704. }
  705. urb_compl = 1;
  706. }
  707. qh->ntd--;
  708. /* Stop if IOC requested descriptor reached */
  709. if (dma_desc->status.b_isoc.ioc) {
  710. idx = desclist_idx_inc(idx, qh->interval, hc->speed);
  711. goto stop_scan;
  712. }
  713. idx = desclist_idx_inc(idx, qh->interval, hc->speed);
  714. if (urb_compl)
  715. break;
  716. }
  717. while (idx != qh->td_first);
  718. }
  719. stop_scan:
  720. qh->td_first = idx;
  721. }
  722. uint8_t update_non_isoc_urb_state_ddma(dwc_otg_hcd_t * hcd,
  723. dwc_hc_t * hc,
  724. dwc_otg_qtd_t * qtd,
  725. dwc_otg_host_dma_desc_t * dma_desc,
  726. dwc_otg_halt_status_e halt_status,
  727. uint32_t n_bytes, uint8_t * xfer_done)
  728. {
  729. uint16_t remain = hc->ep_is_in ? dma_desc->status.b.n_bytes : 0;
  730. dwc_otg_hcd_urb_t *urb = qtd->urb;
  731. if (halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
  732. urb->status = -DWC_E_IO;
  733. return 1;
  734. }
  735. if (dma_desc->status.b.sts == DMA_DESC_STS_PKTERR) {
  736. switch (halt_status) {
  737. case DWC_OTG_HC_XFER_STALL:
  738. urb->status = -DWC_E_PIPE;
  739. break;
  740. case DWC_OTG_HC_XFER_BABBLE_ERR:
  741. urb->status = -DWC_E_OVERFLOW;
  742. break;
  743. case DWC_OTG_HC_XFER_XACT_ERR:
  744. urb->status = -DWC_E_PROTOCOL;
  745. break;
  746. default:
  747. DWC_ERROR("%s: Unhandled descriptor error status (%d)\n", __func__,
  748. halt_status);
  749. break;
  750. }
  751. return 1;
  752. }
  753. if (dma_desc->status.b.a == 1) {
  754. DWC_DEBUGPL(DBG_HCDV,
  755. "Active descriptor encountered on channel %d\n",
  756. hc->hc_num);
  757. return 0;
  758. }
  759. if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL) {
  760. if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
  761. urb->actual_length += n_bytes - remain;
  762. if (remain || urb->actual_length == urb->length) {
  763. /*
  764. * For Control Data stage do not set urb->status=0 to prevent
  765. * URB callback. Set it when Status phase done. See below.
  766. */
  767. *xfer_done = 1;
  768. }
  769. } else if (qtd->control_phase == DWC_OTG_CONTROL_STATUS) {
  770. urb->status = 0;
  771. *xfer_done = 1;
  772. }
  773. /* No handling for SETUP stage */
  774. } else {
  775. /* BULK and INTR */
  776. urb->actual_length += n_bytes - remain;
  777. if (remain || urb->actual_length == urb->length) {
  778. urb->status = 0;
  779. *xfer_done = 1;
  780. }
  781. }
  782. return 0;
  783. }
  784. static void complete_non_isoc_xfer_ddma(dwc_otg_hcd_t * hcd,
  785. dwc_hc_t * hc,
  786. dwc_otg_hc_regs_t * hc_regs,
  787. dwc_otg_halt_status_e halt_status)
  788. {
  789. dwc_otg_hcd_urb_t *urb = NULL;
  790. dwc_otg_qtd_t *qtd, *qtd_tmp;
  791. dwc_otg_qh_t *qh;
  792. dwc_otg_host_dma_desc_t *dma_desc;
  793. uint32_t n_bytes, n_desc, i;
  794. uint8_t failed = 0, xfer_done;
  795. n_desc = 0;
  796. qh = hc->qh;
  797. if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE) {
  798. DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &hc->qh->qtd_list, qtd_list_entry) {
  799. qtd->in_process = 0;
  800. }
  801. return;
  802. }
  803. DWC_CIRCLEQ_FOREACH_SAFE(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
  804. urb = qtd->urb;
  805. n_bytes = 0;
  806. xfer_done = 0;
  807. for (i = 0; i < qtd->n_desc; i++) {
  808. dma_desc = &qh->desc_list[n_desc];
  809. n_bytes = qh->n_bytes[n_desc];
  810. failed =
  811. update_non_isoc_urb_state_ddma(hcd, hc, qtd,
  812. dma_desc,
  813. halt_status, n_bytes,
  814. &xfer_done);
  815. if (failed
  816. || (xfer_done
  817. && (urb->status != -DWC_E_IN_PROGRESS))) {
  818. hcd->fops->complete(hcd, urb->priv, urb,
  819. urb->status);
  820. dwc_otg_hcd_qtd_remove_and_free(hcd, qtd, qh);
  821. if (failed)
  822. goto stop_scan;
  823. } else if (qh->ep_type == UE_CONTROL) {
  824. if (qtd->control_phase == DWC_OTG_CONTROL_SETUP) {
  825. if (urb->length > 0) {
  826. qtd->control_phase = DWC_OTG_CONTROL_DATA;
  827. } else {
  828. qtd->control_phase = DWC_OTG_CONTROL_STATUS;
  829. }
  830. DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n");
  831. } else if (qtd->control_phase == DWC_OTG_CONTROL_DATA) {
  832. if (xfer_done) {
  833. qtd->control_phase = DWC_OTG_CONTROL_STATUS;
  834. DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n");
  835. } else if (i + 1 == qtd->n_desc) {
  836. /*
  837. * Last descriptor for Control data stage which is
  838. * not completed yet.
  839. */
  840. dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
  841. }
  842. }
  843. }
  844. n_desc++;
  845. }
  846. }
  847. stop_scan:
  848. if (qh->ep_type != UE_CONTROL) {
  849. /*
  850. * Resetting the data toggle for bulk
  851. * and interrupt endpoints in case of stall. See handle_hc_stall_intr()
  852. */
  853. if (halt_status == DWC_OTG_HC_XFER_STALL)
  854. qh->data_toggle = DWC_OTG_HC_PID_DATA0;
  855. else
  856. dwc_otg_hcd_save_data_toggle(hc, hc_regs, qtd);
  857. }
  858. if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
  859. hcint_data_t hcint;
  860. hcint.d32 = DWC_READ_REG32(&hc_regs->hcint);
  861. if (hcint.b.nyet) {
  862. /*
  863. * Got a NYET on the last transaction of the transfer. It
  864. * means that the endpoint should be in the PING state at the
  865. * beginning of the next transfer.
  866. */
  867. qh->ping_state = 1;
  868. clear_hc_int(hc_regs, nyet);
  869. }
  870. }
  871. }
  872. /**
  873. * This function is called from interrupt handlers.
  874. * Scans the descriptor list, updates URB's status and
  875. * calls completion routine for the URB if it's done.
  876. * Releases the channel to be used by other transfers.
  877. * In case of Isochronous endpoint the channel is not halted until
  878. * the end of the session, i.e. QTD list is empty.
  879. * If periodic channel released the FrameList is updated accordingly.
  880. *
  881. * Calls transaction selection routines to activate pending transfers.
  882. *
  883. * @param hcd The HCD state structure for the DWC OTG controller.
  884. * @param hc Host channel, the transfer is completed on.
  885. * @param hc_regs Host channel registers.
  886. * @param halt_status Reason the channel is being halted,
  887. * or just XferComplete for isochronous transfer
  888. */
  889. void dwc_otg_hcd_complete_xfer_ddma(dwc_otg_hcd_t * hcd,
  890. dwc_hc_t * hc,
  891. dwc_otg_hc_regs_t * hc_regs,
  892. dwc_otg_halt_status_e halt_status)
  893. {
  894. uint8_t continue_isoc_xfer = 0;
  895. dwc_otg_transaction_type_e tr_type;
  896. dwc_otg_qh_t *qh = hc->qh;
  897. if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
  898. complete_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
  899. /* Release the channel if halted or session completed */
  900. if (halt_status != DWC_OTG_HC_XFER_COMPLETE ||
  901. DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
  902. /* Halt the channel if session completed */
  903. if (halt_status == DWC_OTG_HC_XFER_COMPLETE) {
  904. dwc_otg_hc_halt(hcd->core_if, hc, halt_status);
  905. }
  906. release_channel_ddma(hcd, qh);
  907. dwc_otg_hcd_qh_remove(hcd, qh);
  908. } else {
  909. /* Keep in assigned schedule to continue transfer */
  910. DWC_LIST_MOVE_HEAD(&hcd->periodic_sched_assigned,
  911. &qh->qh_list_entry);
  912. continue_isoc_xfer = 1;
  913. }
  914. /** @todo Consider the case when period exceeds FrameList size.
  915. * Frame Rollover interrupt should be used.
  916. */
  917. } else {
  918. /* Scan descriptor list to complete the URB(s), then release the channel */
  919. complete_non_isoc_xfer_ddma(hcd, hc, hc_regs, halt_status);
  920. release_channel_ddma(hcd, qh);
  921. dwc_otg_hcd_qh_remove(hcd, qh);
  922. if (!DWC_CIRCLEQ_EMPTY(&qh->qtd_list)) {
  923. /* Add back to inactive non-periodic schedule on normal completion */
  924. dwc_otg_hcd_qh_add(hcd, qh);
  925. }
  926. }
  927. tr_type = dwc_otg_hcd_select_transactions(hcd);
  928. if (tr_type != DWC_OTG_TRANSACTION_NONE || continue_isoc_xfer) {
  929. if (continue_isoc_xfer) {
  930. if (tr_type == DWC_OTG_TRANSACTION_NONE) {
  931. tr_type = DWC_OTG_TRANSACTION_PERIODIC;
  932. } else if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC) {
  933. tr_type = DWC_OTG_TRANSACTION_ALL;
  934. }
  935. }
  936. dwc_otg_hcd_queue_transactions(hcd, tr_type);
  937. }
  938. }
  939. #endif /* DWC_DEVICE_ONLY */