dwc_otg_pcd.c 69 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725
  1. /* ==========================================================================
  2. * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $
  3. * $Revision: #99 $
  4. * $Date: 2011/10/24 $
  5. * $Change: 1871160 $
  6. *
  7. * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
  8. * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
  9. * otherwise expressly agreed to in writing between Synopsys and you.
  10. *
  11. * The Software IS NOT an item of Licensed Software or Licensed Product under
  12. * any End User Software License Agreement or Agreement for Licensed Product
  13. * with Synopsys or any supplement thereto. You are permitted to use and
  14. * redistribute this Software in source and binary forms, with or without
  15. * modification, provided that redistributions of source code must retain this
  16. * notice. You may not view, use, disclose, copy or distribute this file or
  17. * any information contained herein except pursuant to this license grant from
  18. * Synopsys. If you do not agree with this notice, including the disclaimer
  19. * below, then you are not authorized to use the Software.
  20. *
  21. * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
  22. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  23. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  24. * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
  25. * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  26. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  27. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  28. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  29. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  30. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  31. * DAMAGE.
  32. * ========================================================================== */
  33. #ifndef DWC_HOST_ONLY
  34. /** @file
  35. * This file implements PCD Core. All code in this file is portable and doesn't
  36. * use any OS specific functions.
  37. * PCD Core provides Interface, defined in <code><dwc_otg_pcd_if.h></code>
  38. * header file, which can be used to implement OS specific PCD interface.
  39. *
  40. * An important function of the PCD is managing interrupts generated
  41. * by the DWC_otg controller. The implementation of the DWC_otg device
  42. * mode interrupt service routines is in dwc_otg_pcd_intr.c.
  43. *
  44. * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
  45. * @todo Does it work when the request size is greater than DEPTSIZ
  46. * transfer size
  47. *
  48. */
  49. #include "dwc_otg_pcd.h"
  50. #ifdef DWC_UTE_CFI
  51. #include "dwc_otg_cfi.h"
  52. extern int init_cfi(cfiobject_t * cfiobj);
  53. #endif
  54. static const char * bc_name[]={
  55. "UNKNOWN (Disconnect)",
  56. "SDP (PC)",
  57. "DCP (Charger)",
  58. "CDP (PC with Charger)",
  59. };
  60. #define T_DCD_TIMEOUT 10
  61. #define T_VDPSRC_ON 40
  62. #define T_VDMSRC_EN (20 + 5)
  63. #define T_VDMSRC_DIS (20 + 5)
  64. #define T_VDMSRC_ON 40
  65. int dwc_otg_charger_detect(dwc_otg_core_if_t * _core_if)
  66. {
  67. usb_peri_reg_t *peri;
  68. usb_adp_bc_data_t adp_bc;
  69. int bc_mode = USB_BC_MODE_DISCONNECT;
  70. int timeout_det;
  71. peri = _core_if->usb_peri_reg;
  72. adp_bc.d32 = DWC_READ_REG32(&peri->adp_bc);
  73. if(adp_bc.b.device_sess_vld){
  74. DWC_MDELAY(T_DCD_TIMEOUT);
  75. /* Turn on VDPSRC */
  76. adp_bc.b.chrgsel = 0;
  77. adp_bc.b.vdatdetenb = 1;
  78. adp_bc.b.vdatsrcenb = 1;
  79. adp_bc.b.dcd_enable = 0;
  80. DWC_WRITE_REG32(&peri->adp_bc,adp_bc.d32);
  81. /* SDP and CDP/DCP distinguish */
  82. timeout_det = T_VDMSRC_EN;
  83. while(timeout_det--){
  84. adp_bc.d32 = DWC_READ_REG32(&peri->adp_bc);
  85. if(adp_bc.b.chg_det)
  86. break;
  87. DWC_MDELAY(1);
  88. };
  89. if(adp_bc.b.chg_det){
  90. /* Turn off VDPSRC */
  91. adp_bc.d32 = DWC_READ_REG32(&peri->adp_bc);
  92. adp_bc.b.vdatdetenb = 0;
  93. adp_bc.b.vdatsrcenb = 0;
  94. DWC_WRITE_REG32(&peri->adp_bc,adp_bc.d32);
  95. /* Wait VDMSRC_DIS */
  96. timeout_det = T_VDMSRC_DIS;
  97. while(timeout_det--){
  98. adp_bc.d32 = DWC_READ_REG32(&peri->adp_bc);
  99. if(!adp_bc.b.chg_det)
  100. break;
  101. DWC_MDELAY(1);
  102. };
  103. if(timeout_det <= 0)
  104. DWC_WARN("Time out for VDMSRC_DIS!");
  105. /* Turn on VDMSRC */
  106. adp_bc.d32 = DWC_READ_REG32(&peri->adp_bc);
  107. adp_bc.b.chrgsel = 1;
  108. adp_bc.b.vdatdetenb = 1;
  109. adp_bc.b.vdatsrcenb = 1;
  110. DWC_WRITE_REG32(&peri->adp_bc,adp_bc.d32);
  111. DWC_MDELAY(T_VDMSRC_ON);
  112. adp_bc.d32 = DWC_READ_REG32(&peri->adp_bc);
  113. if(adp_bc.b.chg_det)
  114. bc_mode = USB_BC_MODE_DCP;
  115. else
  116. bc_mode = USB_BC_MODE_CDP;
  117. }
  118. else{
  119. bc_mode = USB_BC_MODE_SDP;
  120. }
  121. adp_bc.d32 = DWC_READ_REG32(&peri->adp_bc);
  122. adp_bc.b.vdatdetenb = 0;
  123. adp_bc.b.vdatsrcenb = 0;
  124. adp_bc.b.dcd_enable = 0;
  125. DWC_WRITE_REG32(&peri->adp_bc,adp_bc.d32);
  126. }
  127. DWC_PRINTF("detected battery charger type: %s\n",bc_name[bc_mode]);
  128. return bc_mode;
  129. }
  130. /**
  131. * Choose endpoint from ep arrays using usb_ep structure.
  132. */
  133. static dwc_otg_pcd_ep_t *get_ep_from_handle(dwc_otg_pcd_t * pcd, void *handle)
  134. {
  135. int i;
  136. if (pcd->ep0.priv == handle) {
  137. return &pcd->ep0;
  138. }
  139. for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
  140. if (pcd->in_ep[i].priv == handle)
  141. return &pcd->in_ep[i];
  142. if (pcd->out_ep[i].priv == handle)
  143. return &pcd->out_ep[i];
  144. }
  145. return NULL;
  146. }
  147. /**
  148. * This function completes a request. It call's the request call back.
  149. */
  150. void dwc_otg_request_done(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req,
  151. int32_t status)
  152. {
  153. unsigned stopped = ep->stopped;
  154. DWC_DEBUGPL(DBG_PCDV, "%s(ep %p req %p)\n", __func__, ep, req);
  155. DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
  156. /* don't modify queue heads during completion callback */
  157. ep->stopped = 1;
  158. /* spin_unlock/spin_lock now done in fops->complete() */
  159. ep->pcd->fops->complete(ep->pcd, ep->priv, req->priv, status,
  160. req->actual);
  161. if (ep->pcd->request_pending > 0) {
  162. --ep->pcd->request_pending;
  163. }
  164. ep->stopped = stopped;
  165. DWC_FREE(req);
  166. }
  167. /**
  168. * This function terminates all the requsts in the EP request queue.
  169. */
  170. void dwc_otg_request_nuke(dwc_otg_pcd_ep_t * ep)
  171. {
  172. dwc_otg_pcd_request_t *req;
  173. ep->stopped = 1;
  174. /* called with irqs blocked?? */
  175. while (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
  176. req = DWC_CIRCLEQ_FIRST(&ep->queue);
  177. dwc_otg_request_done(ep, req, -DWC_E_SHUTDOWN);
  178. }
  179. }
  180. void dwc_otg_pcd_start(dwc_otg_pcd_t * pcd,
  181. const struct dwc_otg_pcd_function_ops *fops)
  182. {
  183. pcd->fops = fops;
  184. }
  185. /**
  186. * PCD Callback function for initializing the PCD when switching to
  187. * device mode.
  188. *
  189. * @param p void pointer to the <code>dwc_otg_pcd_t</code>
  190. */
  191. static int32_t dwc_otg_pcd_start_cb(void *p)
  192. {
  193. dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
  194. dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
  195. /*
  196. * Initialized the Core for Device mode.
  197. */
  198. if (dwc_otg_is_device_mode(core_if)) {
  199. dwc_otg_core_dev_init(core_if);
  200. /* Set core_if's lock pointer to the pcd->lock */
  201. core_if->lock = pcd->lock;
  202. }
  203. return 1;
  204. }
  205. /** CFI-specific buffer allocation function for EP */
  206. #ifdef DWC_UTE_CFI
  207. uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
  208. size_t buflen, int flags)
  209. {
  210. dwc_otg_pcd_ep_t *ep;
  211. ep = get_ep_from_handle(pcd, pep);
  212. if (!ep) {
  213. DWC_WARN("bad ep\n");
  214. return -DWC_E_INVALID;
  215. }
  216. return pcd->cfi->ops.ep_alloc_buf(pcd->cfi, pcd, ep, addr, buflen,
  217. flags);
  218. }
  219. #else
  220. uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
  221. size_t buflen, int flags);
  222. #endif
  223. /**
  224. * PCD Callback function for notifying the PCD when resuming from
  225. * suspend.
  226. *
  227. * @param p void pointer to the <code>dwc_otg_pcd_t</code>
  228. */
  229. static int32_t dwc_otg_pcd_resume_cb(void *p)
  230. {
  231. dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
  232. if (pcd->fops->resume) {
  233. pcd->fops->resume(pcd);
  234. }
  235. /* Stop the SRP timeout timer. */
  236. if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS)
  237. || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
  238. if (GET_CORE_IF(pcd)->srp_timer_started) {
  239. GET_CORE_IF(pcd)->srp_timer_started = 0;
  240. DWC_TIMER_CANCEL(GET_CORE_IF(pcd)->srp_timer);
  241. }
  242. }
  243. return 1;
  244. }
  245. /**
  246. * PCD Callback function for notifying the PCD device is suspended.
  247. *
  248. * @param p void pointer to the <code>dwc_otg_pcd_t</code>
  249. */
  250. static int32_t dwc_otg_pcd_suspend_cb(void *p)
  251. {
  252. dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
  253. if (pcd->fops->suspend) {
  254. /* temporarily remove these unlock/lock */
  255. //DWC_SPINUNLOCK(pcd->lock);
  256. pcd->fops->suspend(pcd);
  257. //DWC_SPINLOCK(pcd->lock);
  258. }
  259. return 1;
  260. }
  261. /**
  262. * PCD Callback function for stopping the PCD when switching to Host
  263. * mode.
  264. *
  265. * @param p void pointer to the <code>dwc_otg_pcd_t</code>
  266. */
  267. static int32_t dwc_otg_pcd_stop_cb(void *p)
  268. {
  269. dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
  270. extern void dwc_otg_pcd_stop(dwc_otg_pcd_t * _pcd);
  271. dwc_otg_pcd_stop(pcd);
  272. return 1;
  273. }
  274. /**
  275. * PCD Callback structure for handling mode switching.
  276. */
  277. static dwc_otg_cil_callbacks_t pcd_callbacks = {
  278. .start = dwc_otg_pcd_start_cb,
  279. .stop = dwc_otg_pcd_stop_cb,
  280. .suspend = dwc_otg_pcd_suspend_cb,
  281. .resume_wakeup = dwc_otg_pcd_resume_cb,
  282. };
  283. /**
  284. * This function allocates a DMA Descriptor chain for the Endpoint
  285. * buffer to be used for a transfer to/from the specified endpoint.
  286. */
  287. dwc_otg_dev_dma_desc_t *dwc_otg_ep_alloc_desc_chain(dwc_dma_t * dma_desc_addr,
  288. uint32_t count)
  289. {
  290. return DWC_DMA_ALLOC_ATOMIC(count * sizeof(dwc_otg_dev_dma_desc_t),
  291. dma_desc_addr);
  292. }
  293. /**
  294. * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
  295. */
  296. void dwc_otg_ep_free_desc_chain(dwc_otg_dev_dma_desc_t * desc_addr,
  297. uint32_t dma_desc_addr, uint32_t count)
  298. {
  299. DWC_DMA_FREE(count * sizeof(dwc_otg_dev_dma_desc_t), desc_addr,
  300. dma_desc_addr);
  301. }
  302. #ifdef DWC_EN_ISOC
  303. /**
  304. * This function initializes a descriptor chain for Isochronous transfer
  305. *
  306. * @param core_if Programming view of DWC_otg controller.
  307. * @param dwc_ep The EP to start the transfer on.
  308. *
  309. */
  310. void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t * core_if,
  311. dwc_ep_t * dwc_ep)
  312. {
  313. dsts_data_t dsts = {.d32 = 0 };
  314. depctl_data_t depctl = {.d32 = 0 };
  315. volatile uint32_t *addr;
  316. int i, j;
  317. uint32_t len;
  318. if (dwc_ep->is_in)
  319. dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval;
  320. else
  321. dwc_ep->desc_cnt =
  322. dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
  323. dwc_ep->bInterval;
  324. /** Allocate descriptors for double buffering */
  325. dwc_ep->iso_desc_addr =
  326. dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,
  327. dwc_ep->desc_cnt * 2);
  328. if (dwc_ep->desc_addr) {
  329. DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
  330. return;
  331. }
  332. dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
  333. /** ISO OUT EP */
  334. if (dwc_ep->is_in == 0) {
  335. dev_dma_desc_sts_t sts = {.d32 = 0 };
  336. dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
  337. dma_addr_t dma_ad;
  338. uint32_t data_per_desc;
  339. dwc_otg_dev_out_ep_regs_t *out_regs =
  340. core_if->dev_if->out_ep_regs[dwc_ep->num];
  341. int offset;
  342. addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
  343. dma_ad = (dma_addr_t) DWC_READ_REG32(&(out_regs->doepdma));
  344. /** Buffer 0 descriptors setup */
  345. dma_ad = dwc_ep->dma_addr0;
  346. sts.b_iso_out.bs = BS_HOST_READY;
  347. sts.b_iso_out.rxsts = 0;
  348. sts.b_iso_out.l = 0;
  349. sts.b_iso_out.sp = 0;
  350. sts.b_iso_out.ioc = 0;
  351. sts.b_iso_out.pid = 0;
  352. sts.b_iso_out.framenum = 0;
  353. offset = 0;
  354. for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
  355. i += dwc_ep->pkt_per_frm) {
  356. for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
  357. uint32_t len = (j + 1) * dwc_ep->maxpacket;
  358. if (len > dwc_ep->data_per_frame)
  359. data_per_desc =
  360. dwc_ep->data_per_frame -
  361. j * dwc_ep->maxpacket;
  362. else
  363. data_per_desc = dwc_ep->maxpacket;
  364. len = data_per_desc % 4;
  365. if (len)
  366. data_per_desc += 4 - len;
  367. sts.b_iso_out.rxbytes = data_per_desc;
  368. dma_desc->buf = dma_ad;
  369. dma_desc->status.d32 = sts.d32;
  370. offset += data_per_desc;
  371. dma_desc++;
  372. dma_ad += data_per_desc;
  373. }
  374. }
  375. for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
  376. uint32_t len = (j + 1) * dwc_ep->maxpacket;
  377. if (len > dwc_ep->data_per_frame)
  378. data_per_desc =
  379. dwc_ep->data_per_frame -
  380. j * dwc_ep->maxpacket;
  381. else
  382. data_per_desc = dwc_ep->maxpacket;
  383. len = data_per_desc % 4;
  384. if (len)
  385. data_per_desc += 4 - len;
  386. sts.b_iso_out.rxbytes = data_per_desc;
  387. dma_desc->buf = dma_ad;
  388. dma_desc->status.d32 = sts.d32;
  389. offset += data_per_desc;
  390. dma_desc++;
  391. dma_ad += data_per_desc;
  392. }
  393. sts.b_iso_out.ioc = 1;
  394. len = (j + 1) * dwc_ep->maxpacket;
  395. if (len > dwc_ep->data_per_frame)
  396. data_per_desc =
  397. dwc_ep->data_per_frame - j * dwc_ep->maxpacket;
  398. else
  399. data_per_desc = dwc_ep->maxpacket;
  400. len = data_per_desc % 4;
  401. if (len)
  402. data_per_desc += 4 - len;
  403. sts.b_iso_out.rxbytes = data_per_desc;
  404. dma_desc->buf = dma_ad;
  405. dma_desc->status.d32 = sts.d32;
  406. dma_desc++;
  407. /** Buffer 1 descriptors setup */
  408. sts.b_iso_out.ioc = 0;
  409. dma_ad = dwc_ep->dma_addr1;
  410. offset = 0;
  411. for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
  412. i += dwc_ep->pkt_per_frm) {
  413. for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
  414. uint32_t len = (j + 1) * dwc_ep->maxpacket;
  415. if (len > dwc_ep->data_per_frame)
  416. data_per_desc =
  417. dwc_ep->data_per_frame -
  418. j * dwc_ep->maxpacket;
  419. else
  420. data_per_desc = dwc_ep->maxpacket;
  421. len = data_per_desc % 4;
  422. if (len)
  423. data_per_desc += 4 - len;
  424. data_per_desc =
  425. sts.b_iso_out.rxbytes = data_per_desc;
  426. dma_desc->buf = dma_ad;
  427. dma_desc->status.d32 = sts.d32;
  428. offset += data_per_desc;
  429. dma_desc++;
  430. dma_ad += data_per_desc;
  431. }
  432. }
  433. for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
  434. data_per_desc =
  435. ((j + 1) * dwc_ep->maxpacket >
  436. dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
  437. j * dwc_ep->maxpacket : dwc_ep->maxpacket;
  438. data_per_desc +=
  439. (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
  440. sts.b_iso_out.rxbytes = data_per_desc;
  441. dma_desc->buf = dma_ad;
  442. dma_desc->status.d32 = sts.d32;
  443. offset += data_per_desc;
  444. dma_desc++;
  445. dma_ad += data_per_desc;
  446. }
  447. sts.b_iso_out.ioc = 1;
  448. sts.b_iso_out.l = 1;
  449. data_per_desc =
  450. ((j + 1) * dwc_ep->maxpacket >
  451. dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
  452. j * dwc_ep->maxpacket : dwc_ep->maxpacket;
  453. data_per_desc +=
  454. (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
  455. sts.b_iso_out.rxbytes = data_per_desc;
  456. dma_desc->buf = dma_ad;
  457. dma_desc->status.d32 = sts.d32;
  458. dwc_ep->next_frame = 0;
  459. /** Write dma_ad into DOEPDMA register */
  460. DWC_WRITE_REG32(&(out_regs->doepdma),
  461. (uint32_t) dwc_ep->iso_dma_desc_addr);
  462. }
  463. /** ISO IN EP */
  464. else {
  465. dev_dma_desc_sts_t sts = {.d32 = 0 };
  466. dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
  467. dma_addr_t dma_ad;
  468. dwc_otg_dev_in_ep_regs_t *in_regs =
  469. core_if->dev_if->in_ep_regs[dwc_ep->num];
  470. unsigned int frmnumber;
  471. fifosize_data_t txfifosize, rxfifosize;
  472. txfifosize.d32 =
  473. DWC_READ_REG32(&core_if->dev_if->
  474. in_ep_regs[dwc_ep->num]->dtxfsts);
  475. rxfifosize.d32 =
  476. DWC_READ_REG32(&core_if->core_global_regs->grxfsiz);
  477. addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
  478. dma_ad = dwc_ep->dma_addr0;
  479. dsts.d32 =
  480. DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
  481. sts.b_iso_in.bs = BS_HOST_READY;
  482. sts.b_iso_in.txsts = 0;
  483. sts.b_iso_in.sp =
  484. (dwc_ep->data_per_frame % dwc_ep->maxpacket) ? 1 : 0;
  485. sts.b_iso_in.ioc = 0;
  486. sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
  487. frmnumber = dwc_ep->next_frame;
  488. sts.b_iso_in.framenum = frmnumber;
  489. sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
  490. sts.b_iso_in.l = 0;
  491. /** Buffer 0 descriptors setup */
  492. for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
  493. dma_desc->buf = dma_ad;
  494. dma_desc->status.d32 = sts.d32;
  495. dma_desc++;
  496. dma_ad += dwc_ep->data_per_frame;
  497. sts.b_iso_in.framenum += dwc_ep->bInterval;
  498. }
  499. sts.b_iso_in.ioc = 1;
  500. dma_desc->buf = dma_ad;
  501. dma_desc->status.d32 = sts.d32;
  502. ++dma_desc;
  503. /** Buffer 1 descriptors setup */
  504. sts.b_iso_in.ioc = 0;
  505. dma_ad = dwc_ep->dma_addr1;
  506. for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
  507. i += dwc_ep->pkt_per_frm) {
  508. dma_desc->buf = dma_ad;
  509. dma_desc->status.d32 = sts.d32;
  510. dma_desc++;
  511. dma_ad += dwc_ep->data_per_frame;
  512. sts.b_iso_in.framenum += dwc_ep->bInterval;
  513. sts.b_iso_in.ioc = 0;
  514. }
  515. sts.b_iso_in.ioc = 1;
  516. sts.b_iso_in.l = 1;
  517. dma_desc->buf = dma_ad;
  518. dma_desc->status.d32 = sts.d32;
  519. dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval;
  520. /** Write dma_ad into diepdma register */
  521. DWC_WRITE_REG32(&(in_regs->diepdma),
  522. (uint32_t) dwc_ep->iso_dma_desc_addr);
  523. }
  524. /** Enable endpoint, clear nak */
  525. depctl.d32 = 0;
  526. depctl.b.epena = 1;
  527. depctl.b.usbactep = 1;
  528. depctl.b.cnak = 1;
  529. DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
  530. depctl.d32 = DWC_READ_REG32(addr);
  531. }
  532. /**
  533. * This function initializes a descriptor chain for Isochronous transfer
  534. *
  535. * @param core_if Programming view of DWC_otg controller.
  536. * @param ep The EP to start the transfer on.
  537. *
  538. */
  539. void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t * core_if,
  540. dwc_ep_t * ep)
  541. {
  542. depctl_data_t depctl = {.d32 = 0 };
  543. volatile uint32_t *addr;
  544. if (ep->is_in) {
  545. addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
  546. } else {
  547. addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
  548. }
  549. if (core_if->dma_enable == 0 || core_if->dma_desc_enable != 0) {
  550. return;
  551. } else {
  552. deptsiz_data_t deptsiz = {.d32 = 0 };
  553. ep->xfer_len =
  554. ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval;
  555. ep->pkt_cnt =
  556. (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
  557. ep->xfer_count = 0;
  558. ep->xfer_buff =
  559. (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
  560. ep->dma_addr =
  561. (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
  562. if (ep->is_in) {
  563. /* Program the transfer size and packet count
  564. * as follows: xfersize = N * maxpacket +
  565. * short_packet pktcnt = N + (short_packet
  566. * exist ? 1 : 0)
  567. */
  568. deptsiz.b.mc = ep->pkt_per_frm;
  569. deptsiz.b.xfersize = ep->xfer_len;
  570. deptsiz.b.pktcnt =
  571. (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
  572. DWC_WRITE_REG32(&core_if->dev_if->
  573. in_ep_regs[ep->num]->dieptsiz,
  574. deptsiz.d32);
  575. /* Write the DMA register */
  576. DWC_WRITE_REG32(&
  577. (core_if->dev_if->
  578. in_ep_regs[ep->num]->diepdma),
  579. (uint32_t) ep->dma_addr);
  580. } else {
  581. deptsiz.b.pktcnt =
  582. (ep->xfer_len + (ep->maxpacket - 1)) /
  583. ep->maxpacket;
  584. deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
  585. DWC_WRITE_REG32(&core_if->dev_if->
  586. out_ep_regs[ep->num]->doeptsiz,
  587. deptsiz.d32);
  588. /* Write the DMA register */
  589. DWC_WRITE_REG32(&
  590. (core_if->dev_if->
  591. out_ep_regs[ep->num]->doepdma),
  592. (uint32_t) ep->dma_addr);
  593. }
  594. /** Enable endpoint, clear nak */
  595. depctl.d32 = 0;
  596. depctl.b.epena = 1;
  597. depctl.b.cnak = 1;
  598. DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
  599. }
  600. }
  601. /**
  602. * This function does the setup for a data transfer for an EP and
  603. * starts the transfer. For an IN transfer, the packets will be
  604. * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
  605. * the packets are unloaded from the Rx FIFO in the ISR.
  606. *
  607. * @param core_if Programming view of DWC_otg controller.
  608. * @param ep The EP to start the transfer on.
  609. */
  610. static void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t * core_if,
  611. dwc_ep_t * ep)
  612. {
  613. if (core_if->dma_enable) {
  614. if (core_if->dma_desc_enable) {
  615. if (ep->is_in) {
  616. ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
  617. } else {
  618. ep->desc_cnt = ep->pkt_cnt;
  619. }
  620. dwc_otg_iso_ep_start_ddma_transfer(core_if, ep);
  621. } else {
  622. if (core_if->pti_enh_enable) {
  623. dwc_otg_iso_ep_start_buf_transfer(core_if, ep);
  624. } else {
  625. ep->cur_pkt_addr =
  626. (ep->proc_buf_num) ? ep->
  627. xfer_buff1 : ep->xfer_buff0;
  628. ep->cur_pkt_dma_addr =
  629. (ep->proc_buf_num) ? ep->
  630. dma_addr1 : ep->dma_addr0;
  631. dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
  632. }
  633. }
  634. } else {
  635. ep->cur_pkt_addr =
  636. (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
  637. ep->cur_pkt_dma_addr =
  638. (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
  639. dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
  640. }
  641. }
  642. /**
  643. * This function stops transfer for an EP and
  644. * resets the ep's variables.
  645. *
  646. * @param core_if Programming view of DWC_otg controller.
  647. * @param ep The EP to start the transfer on.
  648. */
  649. void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t * core_if, dwc_ep_t * ep)
  650. {
  651. depctl_data_t depctl = {.d32 = 0 };
  652. volatile uint32_t *addr;
  653. if (ep->is_in == 1) {
  654. addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
  655. } else {
  656. addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
  657. }
  658. /* disable the ep */
  659. depctl.d32 = DWC_READ_REG32(addr);
  660. depctl.b.epdis = 1;
  661. depctl.b.snak = 1;
  662. DWC_WRITE_REG32(addr, depctl.d32);
  663. if (core_if->dma_desc_enable &&
  664. ep->iso_desc_addr && ep->iso_dma_desc_addr) {
  665. dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,
  666. ep->iso_dma_desc_addr,
  667. ep->desc_cnt * 2);
  668. }
  669. /* reset varibales */
  670. ep->dma_addr0 = 0;
  671. ep->dma_addr1 = 0;
  672. ep->xfer_buff0 = 0;
  673. ep->xfer_buff1 = 0;
  674. ep->data_per_frame = 0;
  675. ep->data_pattern_frame = 0;
  676. ep->sync_frame = 0;
  677. ep->buf_proc_intrvl = 0;
  678. ep->bInterval = 0;
  679. ep->proc_buf_num = 0;
  680. ep->pkt_per_frm = 0;
  681. ep->pkt_per_frm = 0;
  682. ep->desc_cnt = 0;
  683. ep->iso_desc_addr = 0;
  684. ep->iso_dma_desc_addr = 0;
  685. }
  686. int dwc_otg_pcd_iso_ep_start(dwc_otg_pcd_t * pcd, void *ep_handle,
  687. uint8_t * buf0, uint8_t * buf1, dwc_dma_t dma0,
  688. dwc_dma_t dma1, int sync_frame, int dp_frame,
  689. int data_per_frame, int start_frame,
  690. int buf_proc_intrvl, void *req_handle,
  691. int atomic_alloc)
  692. {
  693. dwc_otg_pcd_ep_t *ep;
  694. dwc_irqflags_t flags = 0;
  695. dwc_ep_t *dwc_ep;
  696. int32_t frm_data;
  697. dsts_data_t dsts;
  698. dwc_otg_core_if_t *core_if;
  699. ep = get_ep_from_handle(pcd, ep_handle);
  700. if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
  701. DWC_WARN("bad ep\n");
  702. return -DWC_E_INVALID;
  703. }
  704. DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
  705. core_if = GET_CORE_IF(pcd);
  706. dwc_ep = &ep->dwc_ep;
  707. if (ep->iso_req_handle) {
  708. DWC_WARN("ISO request in progress\n");
  709. }
  710. dwc_ep->dma_addr0 = dma0;
  711. dwc_ep->dma_addr1 = dma1;
  712. dwc_ep->xfer_buff0 = buf0;
  713. dwc_ep->xfer_buff1 = buf1;
  714. dwc_ep->data_per_frame = data_per_frame;
  715. /** @todo - pattern data support is to be implemented in the future */
  716. dwc_ep->data_pattern_frame = dp_frame;
  717. dwc_ep->sync_frame = sync_frame;
  718. dwc_ep->buf_proc_intrvl = buf_proc_intrvl;
  719. dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1);
  720. dwc_ep->proc_buf_num = 0;
  721. dwc_ep->pkt_per_frm = 0;
  722. frm_data = ep->dwc_ep.data_per_frame;
  723. while (frm_data > 0) {
  724. dwc_ep->pkt_per_frm++;
  725. frm_data -= ep->dwc_ep.maxpacket;
  726. }
  727. dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
  728. if (start_frame == -1) {
  729. dwc_ep->next_frame = dsts.b.soffn + 1;
  730. if (dwc_ep->bInterval != 1) {
  731. dwc_ep->next_frame =
  732. dwc_ep->next_frame + (dwc_ep->bInterval - 1 -
  733. dwc_ep->next_frame %
  734. dwc_ep->bInterval);
  735. }
  736. } else {
  737. dwc_ep->next_frame = start_frame;
  738. }
  739. if (!core_if->pti_enh_enable) {
  740. dwc_ep->pkt_cnt =
  741. dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
  742. dwc_ep->bInterval;
  743. } else {
  744. dwc_ep->pkt_cnt =
  745. (dwc_ep->data_per_frame *
  746. (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval)
  747. - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket;
  748. }
  749. if (core_if->dma_desc_enable) {
  750. dwc_ep->desc_cnt =
  751. dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
  752. dwc_ep->bInterval;
  753. }
  754. if (atomic_alloc) {
  755. dwc_ep->pkt_info =
  756. DWC_ALLOC_ATOMIC(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
  757. } else {
  758. dwc_ep->pkt_info =
  759. DWC_ALLOC(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
  760. }
  761. if (!dwc_ep->pkt_info) {
  762. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  763. return -DWC_E_NO_MEMORY;
  764. }
  765. if (core_if->pti_enh_enable) {
  766. dwc_memset(dwc_ep->pkt_info, 0,
  767. sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
  768. }
  769. dwc_ep->cur_pkt = 0;
  770. ep->iso_req_handle = req_handle;
  771. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  772. dwc_otg_iso_ep_start_transfer(core_if, dwc_ep);
  773. return 0;
  774. }
  775. int dwc_otg_pcd_iso_ep_stop(dwc_otg_pcd_t * pcd, void *ep_handle,
  776. void *req_handle)
  777. {
  778. dwc_irqflags_t flags = 0;
  779. dwc_otg_pcd_ep_t *ep;
  780. dwc_ep_t *dwc_ep;
  781. ep = get_ep_from_handle(pcd, ep_handle);
  782. if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
  783. DWC_WARN("bad ep\n");
  784. return -DWC_E_INVALID;
  785. }
  786. dwc_ep = &ep->dwc_ep;
  787. dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep);
  788. DWC_FREE(dwc_ep->pkt_info);
  789. DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
  790. if (ep->iso_req_handle != req_handle) {
  791. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  792. return -DWC_E_INVALID;
  793. }
  794. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  795. ep->iso_req_handle = 0;
  796. return 0;
  797. }
  798. /**
  799. * This function is used for perodical data exchnage between PCD and gadget drivers.
  800. * for Isochronous EPs
  801. *
  802. * - Every time a sync period completes this function is called to
  803. * perform data exchange between PCD and gadget
  804. */
  805. void dwc_otg_iso_buffer_done(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * ep,
  806. void *req_handle)
  807. {
  808. int i;
  809. dwc_ep_t *dwc_ep;
  810. dwc_ep = &ep->dwc_ep;
  811. DWC_SPINUNLOCK(ep->pcd->lock);
  812. pcd->fops->isoc_complete(pcd, ep->priv, ep->iso_req_handle,
  813. dwc_ep->proc_buf_num ^ 0x1);
  814. DWC_SPINLOCK(ep->pcd->lock);
  815. for (i = 0; i < dwc_ep->pkt_cnt; ++i) {
  816. dwc_ep->pkt_info[i].status = 0;
  817. dwc_ep->pkt_info[i].offset = 0;
  818. dwc_ep->pkt_info[i].length = 0;
  819. }
  820. }
  821. int dwc_otg_pcd_get_iso_packet_count(dwc_otg_pcd_t * pcd, void *ep_handle,
  822. void *iso_req_handle)
  823. {
  824. dwc_otg_pcd_ep_t *ep;
  825. dwc_ep_t *dwc_ep;
  826. ep = get_ep_from_handle(pcd, ep_handle);
  827. if (!ep->desc || ep->dwc_ep.num == 0) {
  828. DWC_WARN("bad ep\n");
  829. return -DWC_E_INVALID;
  830. }
  831. dwc_ep = &ep->dwc_ep;
  832. return dwc_ep->pkt_cnt;
  833. }
  834. void dwc_otg_pcd_get_iso_packet_params(dwc_otg_pcd_t * pcd, void *ep_handle,
  835. void *iso_req_handle, int packet,
  836. int *status, int *actual, int *offset)
  837. {
  838. dwc_otg_pcd_ep_t *ep;
  839. dwc_ep_t *dwc_ep;
  840. ep = get_ep_from_handle(pcd, ep_handle);
  841. if (!ep)
  842. DWC_WARN("bad ep\n");
  843. dwc_ep = &ep->dwc_ep;
  844. *status = dwc_ep->pkt_info[packet].status;
  845. *actual = dwc_ep->pkt_info[packet].length;
  846. *offset = dwc_ep->pkt_info[packet].offset;
  847. }
  848. #endif /* DWC_EN_ISOC */
  849. static void dwc_otg_pcd_init_ep(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * pcd_ep,
  850. uint32_t is_in, uint32_t ep_num)
  851. {
  852. /* Init EP structure */
  853. pcd_ep->desc = 0;
  854. pcd_ep->pcd = pcd;
  855. pcd_ep->stopped = 1;
  856. pcd_ep->queue_sof = 0;
  857. /* Init DWC ep structure */
  858. pcd_ep->dwc_ep.is_in = is_in;
  859. pcd_ep->dwc_ep.num = ep_num;
  860. pcd_ep->dwc_ep.active = 0;
  861. pcd_ep->dwc_ep.tx_fifo_num = 0;
  862. /* Control until ep is actvated */
  863. pcd_ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
  864. pcd_ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
  865. pcd_ep->dwc_ep.dma_addr = 0;
  866. pcd_ep->dwc_ep.start_xfer_buff = 0;
  867. pcd_ep->dwc_ep.xfer_buff = 0;
  868. pcd_ep->dwc_ep.xfer_len = 0;
  869. pcd_ep->dwc_ep.xfer_count = 0;
  870. pcd_ep->dwc_ep.sent_zlp = 0;
  871. pcd_ep->dwc_ep.total_len = 0;
  872. pcd_ep->dwc_ep.desc_addr = 0;
  873. pcd_ep->dwc_ep.dma_desc_addr = 0;
  874. DWC_CIRCLEQ_INIT(&pcd_ep->queue);
  875. }
  876. /**
  877. * Initialize ep's
  878. */
  879. static void dwc_otg_pcd_reinit(dwc_otg_pcd_t * pcd)
  880. {
  881. int i;
  882. uint32_t hwcfg1;
  883. dwc_otg_pcd_ep_t *ep;
  884. int in_ep_cntr, out_ep_cntr;
  885. uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
  886. uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
  887. /**
  888. * Initialize the EP0 structure.
  889. */
  890. ep = &pcd->ep0;
  891. dwc_otg_pcd_init_ep(pcd, ep, 0, 0);
  892. in_ep_cntr = 0;
  893. hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
  894. for (i = 1; in_ep_cntr < num_in_eps; i++) {
  895. if ((hwcfg1 & 0x1) == 0) {
  896. dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
  897. in_ep_cntr++;
  898. /**
  899. * @todo NGS: Add direction to EP, based on contents
  900. * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
  901. * sprintf(";r
  902. */
  903. dwc_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
  904. DWC_CIRCLEQ_INIT(&ep->queue);
  905. }
  906. hwcfg1 >>= 2;
  907. }
  908. out_ep_cntr = 0;
  909. hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
  910. for (i = 1; out_ep_cntr < num_out_eps; i++) {
  911. if ((hwcfg1 & 0x1) == 0) {
  912. dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
  913. out_ep_cntr++;
  914. /**
  915. * @todo NGS: Add direction to EP, based on contents
  916. * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
  917. * sprintf(";r
  918. */
  919. dwc_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
  920. DWC_CIRCLEQ_INIT(&ep->queue);
  921. }
  922. hwcfg1 >>= 2;
  923. }
  924. pcd->ep0state = EP0_DISCONNECT;
  925. pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
  926. pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
  927. }
  928. /**
  929. * This function is called when the SRP timer expires. The SRP should
  930. * complete within 6 seconds.
  931. */
  932. static void srp_timeout(void *ptr)
  933. {
  934. gotgctl_data_t gotgctl;
  935. dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *) ptr;
  936. volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
  937. gotgctl.d32 = DWC_READ_REG32(addr);
  938. core_if->srp_timer_started = 0;
  939. if (core_if->adp_enable) {
  940. if (gotgctl.b.bsesvld == 0) {
  941. gpwrdn_data_t gpwrdn = {.d32 = 0 };
  942. DWC_PRINTF("SRP Timeout BSESSVLD = 0\n");
  943. /* Power off the core */
  944. if (core_if->power_down == 2) {
  945. gpwrdn.b.pwrdnswtch = 1;
  946. DWC_MODIFY_REG32(&core_if->core_global_regs->
  947. gpwrdn, gpwrdn.d32, 0);
  948. }
  949. gpwrdn.d32 = 0;
  950. gpwrdn.b.pmuintsel = 1;
  951. gpwrdn.b.pmuactv = 1;
  952. DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
  953. dwc_otg_adp_probe_start(core_if);
  954. } else {
  955. DWC_PRINTF("SRP Timeout BSESSVLD = 1\n");
  956. core_if->op_state = B_PERIPHERAL;
  957. dwc_otg_core_init(core_if);
  958. dwc_otg_enable_global_interrupts(core_if);
  959. cil_pcd_start(core_if);
  960. }
  961. }
  962. if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
  963. (core_if->core_params->i2c_enable)) {
  964. DWC_PRINTF("SRP Timeout\n");
  965. if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
  966. if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
  967. core_if->pcd_cb->resume_wakeup(core_if->pcd_cb_p);
  968. }
  969. /* Clear Session Request */
  970. gotgctl.d32 = 0;
  971. gotgctl.b.sesreq = 1;
  972. DWC_MODIFY_REG32(&core_if->core_global_regs->gotgctl,
  973. gotgctl.d32, 0);
  974. core_if->srp_success = 0;
  975. } else {
  976. __DWC_ERROR("Device not connected/responding\n");
  977. gotgctl.b.sesreq = 0;
  978. DWC_WRITE_REG32(addr, gotgctl.d32);
  979. }
  980. } else if (gotgctl.b.sesreq) {
  981. DWC_PRINTF("SRP Timeout\n");
  982. __DWC_ERROR("Device not connected/responding\n");
  983. gotgctl.b.sesreq = 0;
  984. DWC_WRITE_REG32(addr, gotgctl.d32);
  985. } else {
  986. DWC_PRINTF(" SRP GOTGCTL=%0x\n", gotgctl.d32);
  987. }
  988. }
  989. /**
  990. * Tasklet
  991. *
  992. */
  993. extern void start_next_request(dwc_otg_pcd_ep_t * ep);
  994. static void start_xfer_tasklet_func(void *data)
  995. {
  996. dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
  997. dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
  998. int i;
  999. depctl_data_t diepctl;
  1000. DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
  1001. diepctl.d32 = DWC_READ_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl);
  1002. if (pcd->ep0.queue_sof) {
  1003. pcd->ep0.queue_sof = 0;
  1004. start_next_request(&pcd->ep0);
  1005. // break;
  1006. }
  1007. for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
  1008. depctl_data_t diepctl;
  1009. diepctl.d32 =
  1010. DWC_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
  1011. if (pcd->in_ep[i].queue_sof) {
  1012. pcd->in_ep[i].queue_sof = 0;
  1013. start_next_request(&pcd->in_ep[i]);
  1014. // break;
  1015. }
  1016. }
  1017. return;
  1018. }
  1019. /**
  1020. * This function initialized the PCD portion of the driver.
  1021. *
  1022. */
  1023. dwc_otg_pcd_t *dwc_otg_pcd_init(dwc_otg_core_if_t * core_if)
  1024. {
  1025. dwc_otg_pcd_t *pcd = NULL;
  1026. dwc_otg_dev_if_t *dev_if;
  1027. int i;
  1028. /*
  1029. * Allocate PCD structure
  1030. */
  1031. pcd = DWC_ALLOC(sizeof(dwc_otg_pcd_t));
  1032. if (pcd == NULL) {
  1033. return NULL;
  1034. }
  1035. pcd->lock = DWC_SPINLOCK_ALLOC();
  1036. if (!pcd->lock) {
  1037. DWC_ERROR("Could not allocate lock for pcd");
  1038. DWC_FREE(pcd);
  1039. return NULL;
  1040. }
  1041. /* Set core_if's lock pointer to hcd->lock */
  1042. core_if->lock = pcd->lock;
  1043. pcd->core_if = core_if;
  1044. dev_if = core_if->dev_if;
  1045. dev_if->isoc_ep = NULL;
  1046. if (core_if->hwcfg4.b.ded_fifo_en) {
  1047. DWC_PRINTF("Dedicated Tx FIFOs mode\n");
  1048. } else {
  1049. DWC_PRINTF("Shared Tx FIFO mode\n");
  1050. }
  1051. /*
  1052. * Initialized the Core for Device mode here if there is nod ADP support.
  1053. * Otherwise it will be done later in dwc_otg_adp_start routine.
  1054. */
  1055. if (dwc_otg_is_device_mode(core_if) /*&& !core_if->adp_enable*/) {
  1056. dwc_otg_core_dev_init(core_if);
  1057. }
  1058. /*
  1059. * Register the PCD Callbacks.
  1060. */
  1061. dwc_otg_cil_register_pcd_callbacks(core_if, &pcd_callbacks, pcd);
  1062. /*
  1063. * Initialize the DMA buffer for SETUP packets
  1064. */
  1065. if (GET_CORE_IF(pcd)->dma_enable) {
  1066. pcd->setup_pkt =
  1067. DWC_DMA_ALLOC(sizeof(*pcd->setup_pkt) * 5,
  1068. &pcd->setup_pkt_dma_handle);
  1069. if (pcd->setup_pkt == NULL) {
  1070. DWC_FREE(pcd);
  1071. return NULL;
  1072. }
  1073. pcd->status_buf =
  1074. DWC_DMA_ALLOC(sizeof(uint16_t),
  1075. &pcd->status_buf_dma_handle);
  1076. if (pcd->status_buf == NULL) {
  1077. DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
  1078. pcd->setup_pkt, pcd->setup_pkt_dma_handle);
  1079. DWC_FREE(pcd);
  1080. return NULL;
  1081. }
  1082. if (GET_CORE_IF(pcd)->dma_desc_enable) {
  1083. dev_if->setup_desc_addr[0] =
  1084. dwc_otg_ep_alloc_desc_chain(&dev_if->
  1085. dma_setup_desc_addr[0],
  1086. 1);
  1087. dev_if->setup_desc_addr[1] =
  1088. dwc_otg_ep_alloc_desc_chain(&dev_if->
  1089. dma_setup_desc_addr[1],
  1090. 1);
  1091. dev_if->in_desc_addr =
  1092. dwc_otg_ep_alloc_desc_chain(&dev_if->
  1093. dma_in_desc_addr, 1);
  1094. dev_if->out_desc_addr =
  1095. dwc_otg_ep_alloc_desc_chain(&dev_if->
  1096. dma_out_desc_addr, 1);
  1097. if (dev_if->setup_desc_addr[0] == 0
  1098. || dev_if->setup_desc_addr[1] == 0
  1099. || dev_if->in_desc_addr == 0
  1100. || dev_if->out_desc_addr == 0) {
  1101. if (dev_if->out_desc_addr)
  1102. dwc_otg_ep_free_desc_chain(dev_if->
  1103. out_desc_addr,
  1104. dev_if->
  1105. dma_out_desc_addr,
  1106. 1);
  1107. if (dev_if->in_desc_addr)
  1108. dwc_otg_ep_free_desc_chain(dev_if->
  1109. in_desc_addr,
  1110. dev_if->
  1111. dma_in_desc_addr,
  1112. 1);
  1113. if (dev_if->setup_desc_addr[1])
  1114. dwc_otg_ep_free_desc_chain(dev_if->
  1115. setup_desc_addr
  1116. [1],
  1117. dev_if->
  1118. dma_setup_desc_addr
  1119. [1], 1);
  1120. if (dev_if->setup_desc_addr[0])
  1121. dwc_otg_ep_free_desc_chain(dev_if->
  1122. setup_desc_addr
  1123. [0],
  1124. dev_if->
  1125. dma_setup_desc_addr
  1126. [0], 1);
  1127. DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
  1128. pcd->setup_pkt,
  1129. pcd->setup_pkt_dma_handle);
  1130. DWC_DMA_FREE(sizeof(*pcd->status_buf),
  1131. pcd->status_buf,
  1132. pcd->status_buf_dma_handle);
  1133. DWC_FREE(pcd);
  1134. return NULL;
  1135. }
  1136. }
  1137. } else {
  1138. pcd->setup_pkt = DWC_ALLOC(sizeof(*pcd->setup_pkt) * 5);
  1139. if (pcd->setup_pkt == NULL) {
  1140. DWC_FREE(pcd);
  1141. return NULL;
  1142. }
  1143. pcd->status_buf = DWC_ALLOC(sizeof(uint16_t));
  1144. if (pcd->status_buf == NULL) {
  1145. DWC_FREE(pcd->setup_pkt);
  1146. DWC_FREE(pcd);
  1147. return NULL;
  1148. }
  1149. }
  1150. dwc_otg_pcd_reinit(pcd);
  1151. /* Allocate the cfi object for the PCD */
  1152. #ifdef DWC_UTE_CFI
  1153. pcd->cfi = DWC_ALLOC(sizeof(cfiobject_t));
  1154. if (NULL == pcd->cfi)
  1155. goto fail;
  1156. if (init_cfi(pcd->cfi)) {
  1157. CFI_INFO("%s: Failed to init the CFI object\n", __func__);
  1158. goto fail;
  1159. }
  1160. #endif
  1161. /* Initialize tasklets */
  1162. pcd->start_xfer_tasklet = DWC_TASK_ALLOC("xfer_tasklet",
  1163. start_xfer_tasklet_func, pcd);
  1164. pcd->test_mode_tasklet = DWC_TASK_ALLOC("test_mode_tasklet",
  1165. do_test_mode, pcd);
  1166. /* Initialize SRP timer */
  1167. core_if->srp_timer = DWC_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
  1168. if (core_if->core_params->dev_out_nak) {
  1169. /**
  1170. * Initialize xfer timeout timer. Implemented for
  1171. * 2.93a feature "Device DDMA OUT NAK Enhancement"
  1172. */
  1173. for(i = 0; i < MAX_EPS_CHANNELS; i++) {
  1174. pcd->core_if->ep_xfer_timer[i] =
  1175. DWC_TIMER_ALLOC("ep timer", ep_xfer_timeout,
  1176. &pcd->core_if->ep_xfer_info[i]);
  1177. }
  1178. }
  1179. return pcd;
  1180. #ifdef DWC_UTE_CFI
  1181. fail:
  1182. #endif
  1183. if (pcd->setup_pkt)
  1184. DWC_FREE(pcd->setup_pkt);
  1185. if (pcd->status_buf)
  1186. DWC_FREE(pcd->status_buf);
  1187. #ifdef DWC_UTE_CFI
  1188. if (pcd->cfi)
  1189. DWC_FREE(pcd->cfi);
  1190. #endif
  1191. if (pcd)
  1192. DWC_FREE(pcd);
  1193. return NULL;
  1194. }
  1195. /**
  1196. * Remove PCD specific data
  1197. */
  1198. void dwc_otg_pcd_remove(dwc_otg_pcd_t * pcd)
  1199. {
  1200. dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
  1201. int i;
  1202. if (pcd->core_if->core_params->dev_out_nak) {
  1203. for (i = 0; i < MAX_EPS_CHANNELS; i++) {
  1204. DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[i]);
  1205. pcd->core_if->ep_xfer_info[i].state = 0;
  1206. }
  1207. }
  1208. if (GET_CORE_IF(pcd)->dma_enable) {
  1209. DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
  1210. pcd->setup_pkt_dma_handle);
  1211. DWC_DMA_FREE(sizeof(uint16_t), pcd->status_buf,
  1212. pcd->status_buf_dma_handle);
  1213. if (GET_CORE_IF(pcd)->dma_desc_enable) {
  1214. dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0],
  1215. dev_if->dma_setup_desc_addr
  1216. [0], 1);
  1217. dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1],
  1218. dev_if->dma_setup_desc_addr
  1219. [1], 1);
  1220. dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr,
  1221. dev_if->dma_in_desc_addr, 1);
  1222. dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr,
  1223. dev_if->dma_out_desc_addr,
  1224. 1);
  1225. }
  1226. } else {
  1227. DWC_FREE(pcd->setup_pkt);
  1228. DWC_FREE(pcd->status_buf);
  1229. }
  1230. DWC_SPINLOCK_FREE(pcd->lock);
  1231. /* Set core_if's lock pointer to NULL */
  1232. pcd->core_if->lock = NULL;
  1233. DWC_TASK_FREE(pcd->start_xfer_tasklet);
  1234. DWC_TASK_FREE(pcd->test_mode_tasklet);
  1235. if (pcd->core_if->core_params->dev_out_nak) {
  1236. for (i = 0; i < MAX_EPS_CHANNELS; i++) {
  1237. if (pcd->core_if->ep_xfer_timer[i]) {
  1238. DWC_TIMER_FREE(pcd->core_if->ep_xfer_timer[i]);
  1239. }
  1240. }
  1241. }
  1242. /* Release the CFI object's dynamic memory */
  1243. #ifdef DWC_UTE_CFI
  1244. if (pcd->cfi->ops.release) {
  1245. pcd->cfi->ops.release(pcd->cfi);
  1246. }
  1247. #endif
  1248. DWC_FREE(pcd);
  1249. }
  1250. /**
  1251. * Returns whether registered pcd is dual speed or not
  1252. */
  1253. uint32_t dwc_otg_pcd_is_dualspeed(dwc_otg_pcd_t * pcd)
  1254. {
  1255. dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
  1256. if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ||
  1257. ((core_if->hwcfg2.b.hs_phy_type == 2) &&
  1258. (core_if->hwcfg2.b.fs_phy_type == 1) &&
  1259. (core_if->core_params->ulpi_fs_ls))) {
  1260. return 0;
  1261. }
  1262. return 1;
  1263. }
  1264. /**
  1265. * Returns whether registered pcd is OTG capable or not
  1266. */
  1267. uint32_t dwc_otg_pcd_is_otg(dwc_otg_pcd_t * pcd)
  1268. {
  1269. dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
  1270. gusbcfg_data_t usbcfg = {.d32 = 0 };
  1271. usbcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
  1272. if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap) {
  1273. return 0;
  1274. }
  1275. return 1;
  1276. }
  1277. /**
  1278. * This function assigns periodic Tx FIFO to an periodic EP
  1279. * in shared Tx FIFO mode
  1280. */
  1281. static uint32_t assign_tx_fifo(dwc_otg_core_if_t * core_if)
  1282. {
  1283. uint32_t TxMsk = 1;
  1284. int i;
  1285. for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
  1286. if ((TxMsk & core_if->tx_msk) == 0) {
  1287. core_if->tx_msk |= TxMsk;
  1288. return i + 1;
  1289. }
  1290. TxMsk <<= 1;
  1291. }
  1292. return 0;
  1293. }
  1294. /**
  1295. * This function assigns periodic Tx FIFO to an periodic EP
  1296. * in shared Tx FIFO mode
  1297. */
  1298. static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t * core_if)
  1299. {
  1300. uint32_t PerTxMsk = 1;
  1301. int i;
  1302. for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
  1303. if ((PerTxMsk & core_if->p_tx_msk) == 0) {
  1304. core_if->p_tx_msk |= PerTxMsk;
  1305. return i + 1;
  1306. }
  1307. PerTxMsk <<= 1;
  1308. }
  1309. return 0;
  1310. }
  1311. /**
  1312. * This function releases periodic Tx FIFO
  1313. * in shared Tx FIFO mode
  1314. */
  1315. static void release_perio_tx_fifo(dwc_otg_core_if_t * core_if,
  1316. uint32_t fifo_num)
  1317. {
  1318. core_if->p_tx_msk =
  1319. (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
  1320. }
  1321. /**
  1322. * This function releases periodic Tx FIFO
  1323. * in shared Tx FIFO mode
  1324. */
  1325. static void release_tx_fifo(dwc_otg_core_if_t * core_if, uint32_t fifo_num)
  1326. {
  1327. core_if->tx_msk =
  1328. (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
  1329. }
  1330. /**
  1331. * This function is being called from gadget
  1332. * to enable PCD endpoint.
  1333. */
  1334. int dwc_otg_pcd_ep_enable(dwc_otg_pcd_t * pcd,
  1335. const uint8_t * ep_desc, void *usb_ep)
  1336. {
  1337. int num, dir;
  1338. dwc_otg_pcd_ep_t *ep = NULL;
  1339. const usb_endpoint_descriptor_t *desc;
  1340. dwc_irqflags_t flags;
  1341. fifosize_data_t dptxfsiz = {.d32 = 0 };
  1342. gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
  1343. gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
  1344. int retval = 0;
  1345. int i, epcount;
  1346. desc = (const usb_endpoint_descriptor_t *)ep_desc;
  1347. if (!desc) {
  1348. pcd->ep0.priv = usb_ep;
  1349. ep = &pcd->ep0;
  1350. retval = -DWC_E_INVALID;
  1351. goto out;
  1352. }
  1353. num = UE_GET_ADDR(desc->bEndpointAddress);
  1354. dir = UE_GET_DIR(desc->bEndpointAddress);
  1355. if (!desc->wMaxPacketSize) {
  1356. DWC_WARN("bad maxpacketsize\n");
  1357. retval = -DWC_E_INVALID;
  1358. goto out;
  1359. }
  1360. if (dir == UE_DIR_IN) {
  1361. epcount = pcd->core_if->dev_if->num_in_eps;
  1362. for (i = 0; i < epcount; i++) {
  1363. if (num == pcd->in_ep[i].dwc_ep.num) {
  1364. ep = &pcd->in_ep[i];
  1365. break;
  1366. }
  1367. }
  1368. } else {
  1369. epcount = pcd->core_if->dev_if->num_out_eps;
  1370. for (i = 0; i < epcount; i++) {
  1371. if (num == pcd->out_ep[i].dwc_ep.num) {
  1372. ep = &pcd->out_ep[i];
  1373. break;
  1374. }
  1375. }
  1376. }
  1377. if (!ep) {
  1378. DWC_WARN("bad address\n");
  1379. retval = -DWC_E_INVALID;
  1380. goto out;
  1381. }
  1382. DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
  1383. ep->desc = desc;
  1384. ep->priv = usb_ep;
  1385. /*
  1386. * Activate the EP
  1387. */
  1388. ep->stopped = 0;
  1389. ep->dwc_ep.is_in = (dir == UE_DIR_IN);
  1390. ep->dwc_ep.maxpacket = UGETW(desc->wMaxPacketSize);
  1391. ep->dwc_ep.type = desc->bmAttributes & UE_XFERTYPE;
  1392. if (ep->dwc_ep.is_in) {
  1393. if (!GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
  1394. ep->dwc_ep.tx_fifo_num = 0;
  1395. if (ep->dwc_ep.type == UE_ISOCHRONOUS) {
  1396. /*
  1397. * if ISOC EP then assign a Periodic Tx FIFO.
  1398. */
  1399. ep->dwc_ep.tx_fifo_num =
  1400. assign_perio_tx_fifo(GET_CORE_IF(pcd));
  1401. }
  1402. } else {
  1403. /*
  1404. * if Dedicated FIFOs mode is on then assign a Tx FIFO.
  1405. */
  1406. ep->dwc_ep.tx_fifo_num =
  1407. assign_tx_fifo(GET_CORE_IF(pcd));
  1408. }
  1409. /* Calculating EP info controller base address */
  1410. if (ep->dwc_ep.tx_fifo_num && GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
  1411. gdfifocfg.d32 =
  1412. DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
  1413. gdfifocfg);
  1414. gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
  1415. dptxfsiz.d32 =
  1416. (DWC_READ_REG32
  1417. (&GET_CORE_IF(pcd)->
  1418. core_global_regs->dtxfsiz[ep->dwc_ep.
  1419. tx_fifo_num-1]) >> 16);
  1420. gdfifocfg.b.epinfobase =
  1421. gdfifocfgbase.d32 + dptxfsiz.d32;
  1422. DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->
  1423. gdfifocfg, gdfifocfg.d32);
  1424. }
  1425. }
  1426. /* Set initial data PID. */
  1427. if (ep->dwc_ep.type == UE_BULK) {
  1428. ep->dwc_ep.data_pid_start = 0;
  1429. }
  1430. /* Alloc DMA Descriptors */
  1431. if (GET_CORE_IF(pcd)->dma_desc_enable) {
  1432. #ifndef DWC_UTE_PER_IO
  1433. if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
  1434. #endif
  1435. ep->dwc_ep.desc_addr =
  1436. dwc_otg_ep_alloc_desc_chain(&ep->
  1437. dwc_ep.dma_desc_addr,
  1438. MAX_DMA_DESC_CNT);
  1439. if (!ep->dwc_ep.desc_addr) {
  1440. DWC_WARN("%s, can't allocate DMA descriptor\n",
  1441. __func__);
  1442. retval = -DWC_E_SHUTDOWN;
  1443. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  1444. goto out;
  1445. }
  1446. #ifndef DWC_UTE_PER_IO
  1447. }
  1448. #endif
  1449. }
  1450. DWC_DEBUGPL(DBG_PCD, "Activate %s: type=%d, mps=%d desc=%p\n",
  1451. (ep->dwc_ep.is_in ? "IN" : "OUT"),
  1452. ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
  1453. #ifdef DWC_UTE_PER_IO
  1454. ep->dwc_ep.xiso_bInterval = 1 << (ep->desc->bInterval - 1);
  1455. #endif
  1456. if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
  1457. ep->dwc_ep.bInterval = 1 << (ep->desc->bInterval - 1);
  1458. ep->dwc_ep.frame_num = 0xFFFFFFFF;
  1459. }
  1460. dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
  1461. #ifdef DWC_UTE_CFI
  1462. if (pcd->cfi->ops.ep_enable) {
  1463. pcd->cfi->ops.ep_enable(pcd->cfi, pcd, ep);
  1464. }
  1465. #endif
  1466. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  1467. out:
  1468. return retval;
  1469. }
  1470. /**
  1471. * This function is being called from gadget
  1472. * to disable PCD endpoint.
  1473. */
  1474. int dwc_otg_pcd_ep_disable(dwc_otg_pcd_t * pcd, void *ep_handle)
  1475. {
  1476. dwc_otg_pcd_ep_t *ep;
  1477. dwc_irqflags_t flags;
  1478. dwc_otg_dev_dma_desc_t *desc_addr;
  1479. dwc_dma_t dma_desc_addr;
  1480. gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
  1481. gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
  1482. fifosize_data_t dptxfsiz = {.d32 = 0 };
  1483. ep = get_ep_from_handle(pcd, ep_handle);
  1484. if (!ep || !ep->desc) {
  1485. DWC_DEBUGPL(DBG_PCD, "bad ep address\n");
  1486. return -DWC_E_INVALID;
  1487. }
  1488. DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
  1489. dwc_otg_request_nuke(ep);
  1490. dwc_otg_ep_deactivate(GET_CORE_IF(pcd), &ep->dwc_ep);
  1491. if (pcd->core_if->core_params->dev_out_nak)
  1492. {
  1493. DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[ep->dwc_ep.num]);
  1494. pcd->core_if->ep_xfer_info[ep->dwc_ep.num].state = 0;
  1495. }
  1496. ep->desc = NULL;
  1497. ep->stopped = 1;
  1498. gdfifocfg.d32 =
  1499. DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg);
  1500. gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
  1501. if (ep->dwc_ep.is_in) {
  1502. if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
  1503. /* Flush the Tx FIFO */
  1504. dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
  1505. }
  1506. release_perio_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
  1507. release_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
  1508. if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
  1509. /* Decreasing EPinfo Base Addr */
  1510. dptxfsiz.d32 =
  1511. (DWC_READ_REG32
  1512. (&GET_CORE_IF(pcd)->
  1513. core_global_regs->dtxfsiz[ep->dwc_ep.tx_fifo_num-1]) >> 16);
  1514. gdfifocfg.b.epinfobase = gdfifocfgbase.d32 - dptxfsiz.d32;
  1515. DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg,
  1516. gdfifocfg.d32);
  1517. }
  1518. }
  1519. /* Free DMA Descriptors */
  1520. if (GET_CORE_IF(pcd)->dma_desc_enable) {
  1521. if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
  1522. desc_addr = ep->dwc_ep.desc_addr;
  1523. dma_desc_addr = ep->dwc_ep.dma_desc_addr;
  1524. /* Cannot call dma_free_coherent() with IRQs disabled */
  1525. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  1526. dwc_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
  1527. MAX_DMA_DESC_CNT);
  1528. goto out_unlocked;
  1529. }
  1530. }
  1531. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  1532. out_unlocked:
  1533. DWC_DEBUGPL(DBG_PCD, "%d %s disabled\n", ep->dwc_ep.num,
  1534. ep->dwc_ep.is_in ? "IN" : "OUT");
  1535. return 0;
  1536. }
  1537. /******************************************************************************/
  1538. #ifdef DWC_UTE_PER_IO
  1539. /**
  1540. * Free the request and its extended parts
  1541. *
  1542. */
  1543. void dwc_pcd_xiso_ereq_free(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req)
  1544. {
  1545. DWC_FREE(req->ext_req.per_io_frame_descs);
  1546. DWC_FREE(req);
  1547. }
  1548. /**
  1549. * Start the next request in the endpoint's queue.
  1550. *
  1551. */
  1552. int dwc_otg_pcd_xiso_start_next_request(dwc_otg_pcd_t * pcd,
  1553. dwc_otg_pcd_ep_t * ep)
  1554. {
  1555. int i;
  1556. dwc_otg_pcd_request_t *req = NULL;
  1557. dwc_ep_t *dwcep = NULL;
  1558. struct dwc_iso_xreq_port *ereq = NULL;
  1559. struct dwc_iso_pkt_desc_port *ddesc_iso;
  1560. uint16_t nat;
  1561. depctl_data_t diepctl;
  1562. dwcep = &ep->dwc_ep;
  1563. if (dwcep->xiso_active_xfers > 0) {
  1564. #if 0 //Disable this to decrease s/w overhead that is crucial for Isoc transfers
  1565. DWC_WARN("There are currently active transfers for EP%d \
  1566. (active=%d; queued=%d)", dwcep->num, dwcep->xiso_active_xfers,
  1567. dwcep->xiso_queued_xfers);
  1568. #endif
  1569. return 0;
  1570. }
  1571. nat = UGETW(ep->desc->wMaxPacketSize);
  1572. nat = (nat >> 11) & 0x03;
  1573. if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
  1574. req = DWC_CIRCLEQ_FIRST(&ep->queue);
  1575. ereq = &req->ext_req;
  1576. ep->stopped = 0;
  1577. /* Get the frame number */
  1578. dwcep->xiso_frame_num =
  1579. dwc_otg_get_frame_number(GET_CORE_IF(pcd));
  1580. DWC_DEBUG("FRM_NUM=%d", dwcep->xiso_frame_num);
  1581. ddesc_iso = ereq->per_io_frame_descs;
  1582. if (dwcep->is_in) {
  1583. /* Setup DMA Descriptor chain for IN Isoc request */
  1584. for (i = 0; i < ereq->pio_pkt_count; i++) {
  1585. //if ((i % (nat + 1)) == 0)
  1586. if ( i > 0 )
  1587. dwcep->xiso_frame_num = (dwcep->xiso_bInterval +
  1588. dwcep->xiso_frame_num) & 0x3FFF;
  1589. dwcep->desc_addr[i].buf =
  1590. req->dma + ddesc_iso[i].offset;
  1591. dwcep->desc_addr[i].status.b_iso_in.txbytes =
  1592. ddesc_iso[i].length;
  1593. dwcep->desc_addr[i].status.b_iso_in.framenum =
  1594. dwcep->xiso_frame_num;
  1595. dwcep->desc_addr[i].status.b_iso_in.bs =
  1596. BS_HOST_READY;
  1597. dwcep->desc_addr[i].status.b_iso_in.txsts = 0;
  1598. dwcep->desc_addr[i].status.b_iso_in.sp =
  1599. (ddesc_iso[i].length %
  1600. dwcep->maxpacket) ? 1 : 0;
  1601. dwcep->desc_addr[i].status.b_iso_in.ioc = 0;
  1602. dwcep->desc_addr[i].status.b_iso_in.pid = nat + 1;
  1603. dwcep->desc_addr[i].status.b_iso_in.l = 0;
  1604. /* Process the last descriptor */
  1605. if (i == ereq->pio_pkt_count - 1) {
  1606. dwcep->desc_addr[i].status.b_iso_in.ioc = 1;
  1607. dwcep->desc_addr[i].status.b_iso_in.l = 1;
  1608. }
  1609. }
  1610. /* Setup and start the transfer for this endpoint */
  1611. dwcep->xiso_active_xfers++;
  1612. DWC_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
  1613. in_ep_regs[dwcep->num]->diepdma,
  1614. dwcep->dma_desc_addr);
  1615. diepctl.d32 = 0;
  1616. diepctl.b.epena = 1;
  1617. diepctl.b.cnak = 1;
  1618. DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
  1619. in_ep_regs[dwcep->num]->diepctl, 0,
  1620. diepctl.d32);
  1621. } else {
  1622. /* Setup DMA Descriptor chain for OUT Isoc request */
  1623. for (i = 0; i < ereq->pio_pkt_count; i++) {
  1624. //if ((i % (nat + 1)) == 0)
  1625. dwcep->xiso_frame_num = (dwcep->xiso_bInterval +
  1626. dwcep->xiso_frame_num) & 0x3FFF;
  1627. dwcep->desc_addr[i].buf =
  1628. req->dma + ddesc_iso[i].offset;
  1629. dwcep->desc_addr[i].status.b_iso_out.rxbytes =
  1630. ddesc_iso[i].length;
  1631. dwcep->desc_addr[i].status.b_iso_out.framenum =
  1632. dwcep->xiso_frame_num;
  1633. dwcep->desc_addr[i].status.b_iso_out.bs =
  1634. BS_HOST_READY;
  1635. dwcep->desc_addr[i].status.b_iso_out.rxsts = 0;
  1636. dwcep->desc_addr[i].status.b_iso_out.sp =
  1637. (ddesc_iso[i].length %
  1638. dwcep->maxpacket) ? 1 : 0;
  1639. dwcep->desc_addr[i].status.b_iso_out.ioc = 0;
  1640. dwcep->desc_addr[i].status.b_iso_out.pid = nat + 1;
  1641. dwcep->desc_addr[i].status.b_iso_out.l = 0;
  1642. /* Process the last descriptor */
  1643. if (i == ereq->pio_pkt_count - 1) {
  1644. dwcep->desc_addr[i].status.b_iso_out.ioc = 1;
  1645. dwcep->desc_addr[i].status.b_iso_out.l = 1;
  1646. }
  1647. }
  1648. /* Setup and start the transfer for this endpoint */
  1649. dwcep->xiso_active_xfers++;
  1650. DWC_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
  1651. out_ep_regs[dwcep->num]->doepdma,
  1652. dwcep->dma_desc_addr);
  1653. diepctl.d32 = 0;
  1654. diepctl.b.epena = 1;
  1655. diepctl.b.cnak = 1;
  1656. DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
  1657. out_ep_regs[dwcep->num]->doepctl, 0,
  1658. diepctl.d32);
  1659. }
  1660. } else {
  1661. ep->stopped = 1;
  1662. }
  1663. return 0;
  1664. }
  1665. /**
  1666. * - Remove the request from the queue
  1667. */
  1668. void complete_xiso_ep(dwc_otg_pcd_ep_t * ep)
  1669. {
  1670. dwc_otg_pcd_request_t *req = NULL;
  1671. struct dwc_iso_xreq_port *ereq = NULL;
  1672. struct dwc_iso_pkt_desc_port *ddesc_iso = NULL;
  1673. dwc_ep_t *dwcep = NULL;
  1674. int i;
  1675. //DWC_DEBUG();
  1676. dwcep = &ep->dwc_ep;
  1677. /* Get the first pending request from the queue */
  1678. if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
  1679. req = DWC_CIRCLEQ_FIRST(&ep->queue);
  1680. if (!req) {
  1681. DWC_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
  1682. return;
  1683. }
  1684. dwcep->xiso_active_xfers--;
  1685. dwcep->xiso_queued_xfers--;
  1686. /* Remove this request from the queue */
  1687. DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
  1688. } else {
  1689. DWC_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
  1690. return;
  1691. }
  1692. ep->stopped = 1;
  1693. ereq = &req->ext_req;
  1694. ddesc_iso = ereq->per_io_frame_descs;
  1695. if (dwcep->xiso_active_xfers < 0) {
  1696. DWC_WARN("EP#%d (xiso_active_xfers=%d)", dwcep->num,
  1697. dwcep->xiso_active_xfers);
  1698. }
  1699. /* Fill the Isoc descs of portable extended req from dma descriptors */
  1700. for (i = 0; i < ereq->pio_pkt_count; i++) {
  1701. if (dwcep->is_in) { /* IN endpoints */
  1702. ddesc_iso[i].actual_length = ddesc_iso[i].length -
  1703. dwcep->desc_addr[i].status.b_iso_in.txbytes;
  1704. ddesc_iso[i].status =
  1705. dwcep->desc_addr[i].status.b_iso_in.txsts;
  1706. } else { /* OUT endpoints */
  1707. ddesc_iso[i].actual_length = ddesc_iso[i].length -
  1708. dwcep->desc_addr[i].status.b_iso_out.rxbytes;
  1709. ddesc_iso[i].status =
  1710. dwcep->desc_addr[i].status.b_iso_out.rxsts;
  1711. }
  1712. }
  1713. DWC_SPINUNLOCK(ep->pcd->lock);
  1714. /* Call the completion function in the non-portable logic */
  1715. ep->pcd->fops->xisoc_complete(ep->pcd, ep->priv, req->priv, 0,
  1716. &req->ext_req);
  1717. DWC_SPINLOCK(ep->pcd->lock);
  1718. /* Free the request - specific freeing needed for extended request object */
  1719. dwc_pcd_xiso_ereq_free(ep, req);
  1720. /* Start the next request */
  1721. dwc_otg_pcd_xiso_start_next_request(ep->pcd, ep);
  1722. return;
  1723. }
  1724. /**
  1725. * Create and initialize the Isoc pkt descriptors of the extended request.
  1726. *
  1727. */
  1728. static int dwc_otg_pcd_xiso_create_pkt_descs(dwc_otg_pcd_request_t * req,
  1729. void *ereq_nonport,
  1730. int atomic_alloc)
  1731. {
  1732. struct dwc_iso_xreq_port *ereq = NULL;
  1733. struct dwc_iso_xreq_port *req_mapped = NULL;
  1734. struct dwc_iso_pkt_desc_port *ipds = NULL; /* To be created in this function */
  1735. uint32_t pkt_count;
  1736. int i;
  1737. ereq = &req->ext_req;
  1738. req_mapped = (struct dwc_iso_xreq_port *)ereq_nonport;
  1739. pkt_count = req_mapped->pio_pkt_count;
  1740. /* Create the isoc descs */
  1741. if (atomic_alloc) {
  1742. ipds = DWC_ALLOC_ATOMIC(sizeof(*ipds) * pkt_count);
  1743. } else {
  1744. ipds = DWC_ALLOC(sizeof(*ipds) * pkt_count);
  1745. }
  1746. if (!ipds) {
  1747. DWC_ERROR("Failed to allocate isoc descriptors");
  1748. return -DWC_E_NO_MEMORY;
  1749. }
  1750. /* Initialize the extended request fields */
  1751. ereq->per_io_frame_descs = ipds;
  1752. ereq->error_count = 0;
  1753. ereq->pio_alloc_pkt_count = pkt_count;
  1754. ereq->pio_pkt_count = pkt_count;
  1755. ereq->tr_sub_flags = req_mapped->tr_sub_flags;
  1756. /* Init the Isoc descriptors */
  1757. for (i = 0; i < pkt_count; i++) {
  1758. ipds[i].length = req_mapped->per_io_frame_descs[i].length;
  1759. ipds[i].offset = req_mapped->per_io_frame_descs[i].offset;
  1760. ipds[i].status = req_mapped->per_io_frame_descs[i].status; /* 0 */
  1761. ipds[i].actual_length =
  1762. req_mapped->per_io_frame_descs[i].actual_length;
  1763. }
  1764. return 0;
  1765. }
  1766. static void prn_ext_request(struct dwc_iso_xreq_port *ereq)
  1767. {
  1768. struct dwc_iso_pkt_desc_port *xfd = NULL;
  1769. int i;
  1770. DWC_DEBUG("per_io_frame_descs=%p", ereq->per_io_frame_descs);
  1771. DWC_DEBUG("tr_sub_flags=%d", ereq->tr_sub_flags);
  1772. DWC_DEBUG("error_count=%d", ereq->error_count);
  1773. DWC_DEBUG("pio_alloc_pkt_count=%d", ereq->pio_alloc_pkt_count);
  1774. DWC_DEBUG("pio_pkt_count=%d", ereq->pio_pkt_count);
  1775. DWC_DEBUG("res=%d", ereq->res);
  1776. for (i = 0; i < ereq->pio_pkt_count; i++) {
  1777. xfd = &ereq->per_io_frame_descs[0];
  1778. DWC_DEBUG("FD #%d", i);
  1779. DWC_DEBUG("xfd->actual_length=%d", xfd->actual_length);
  1780. DWC_DEBUG("xfd->length=%d", xfd->length);
  1781. DWC_DEBUG("xfd->offset=%d", xfd->offset);
  1782. DWC_DEBUG("xfd->status=%d", xfd->status);
  1783. }
  1784. }
  1785. /**
  1786. *
  1787. */
  1788. int dwc_otg_pcd_xiso_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
  1789. uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
  1790. int zero, void *req_handle, int atomic_alloc,
  1791. void *ereq_nonport)
  1792. {
  1793. dwc_otg_pcd_request_t *req = NULL;
  1794. dwc_otg_pcd_ep_t *ep;
  1795. dwc_irqflags_t flags;
  1796. int res;
  1797. ep = get_ep_from_handle(pcd, ep_handle);
  1798. if (!ep) {
  1799. DWC_WARN("bad ep\n");
  1800. return -DWC_E_INVALID;
  1801. }
  1802. /* We support this extension only for DDMA mode */
  1803. if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC)
  1804. if (!GET_CORE_IF(pcd)->dma_desc_enable)
  1805. return -DWC_E_INVALID;
  1806. /* Create a dwc_otg_pcd_request_t object */
  1807. if (atomic_alloc) {
  1808. req = DWC_ALLOC_ATOMIC(sizeof(*req));
  1809. } else {
  1810. req = DWC_ALLOC(sizeof(*req));
  1811. }
  1812. if (!req) {
  1813. return -DWC_E_NO_MEMORY;
  1814. }
  1815. /* Create the Isoc descs for this request which shall be the exact match
  1816. * of the structure sent to us from the non-portable logic */
  1817. res =
  1818. dwc_otg_pcd_xiso_create_pkt_descs(req, ereq_nonport, atomic_alloc);
  1819. if (res) {
  1820. DWC_WARN("Failed to init the Isoc descriptors");
  1821. DWC_FREE(req);
  1822. return res;
  1823. }
  1824. DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
  1825. DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
  1826. req->buf = buf;
  1827. req->dma = dma_buf;
  1828. req->length = buflen;
  1829. req->sent_zlp = zero;
  1830. req->priv = req_handle;
  1831. //DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
  1832. ep->dwc_ep.dma_addr = dma_buf;
  1833. ep->dwc_ep.start_xfer_buff = buf;
  1834. ep->dwc_ep.xfer_buff = buf;
  1835. ep->dwc_ep.xfer_len = 0;
  1836. ep->dwc_ep.xfer_count = 0;
  1837. ep->dwc_ep.sent_zlp = 0;
  1838. ep->dwc_ep.total_len = buflen;
  1839. /* Add this request to the tail */
  1840. DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
  1841. ep->dwc_ep.xiso_queued_xfers++;
  1842. //DWC_DEBUG("CP_0");
  1843. //DWC_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags);
  1844. //prn_ext_request((struct dwc_iso_xreq_port *) ereq_nonport);
  1845. //prn_ext_request(&req->ext_req);
  1846. //DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  1847. /* If the req->status == ASAP then check if there is any active transfer
  1848. * for this endpoint. If no active transfers, then get the first entry
  1849. * from the queue and start that transfer
  1850. */
  1851. if (req->ext_req.tr_sub_flags == DWC_EREQ_TF_ASAP) {
  1852. res = dwc_otg_pcd_xiso_start_next_request(pcd, ep);
  1853. if (res) {
  1854. DWC_WARN("Failed to start the next Isoc transfer");
  1855. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  1856. DWC_FREE(req);
  1857. return res;
  1858. }
  1859. }
  1860. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  1861. return 0;
  1862. }
  1863. #endif
  1864. /* END ifdef DWC_UTE_PER_IO ***************************************************/
  1865. int dwc_otg_pcd_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
  1866. uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
  1867. int zero, void *req_handle, int atomic_alloc)
  1868. {
  1869. dwc_irqflags_t flags;
  1870. dwc_otg_pcd_request_t *req;
  1871. dwc_otg_pcd_ep_t *ep;
  1872. uint32_t max_transfer;
  1873. ep = get_ep_from_handle(pcd, ep_handle);
  1874. if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
  1875. DWC_WARN("bad ep\n");
  1876. return -DWC_E_INVALID;
  1877. }
  1878. if (atomic_alloc) {
  1879. req = DWC_ALLOC_ATOMIC(sizeof(*req));
  1880. } else {
  1881. req = DWC_ALLOC(sizeof(*req));
  1882. }
  1883. if (!req) {
  1884. return -DWC_E_NO_MEMORY;
  1885. }
  1886. DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
  1887. if (!GET_CORE_IF(pcd)->core_params->opt) {
  1888. if (ep->dwc_ep.num != 0) {
  1889. DWC_ERROR("queue req %p, len %d buf %p\n",
  1890. req_handle, buflen, buf);
  1891. }
  1892. }
  1893. req->buf = buf;
  1894. req->dma = dma_buf;
  1895. req->length = buflen;
  1896. req->sent_zlp = zero;
  1897. req->priv = req_handle;
  1898. req->dw_align_buf = NULL;
  1899. if ((dma_buf & 0x3) && GET_CORE_IF(pcd)->dma_enable
  1900. && !GET_CORE_IF(pcd)->dma_desc_enable)
  1901. req->dw_align_buf = DWC_DMA_ALLOC(buflen,
  1902. &req->dw_align_buf_dma);
  1903. DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
  1904. /*
  1905. * After adding request to the queue for IN ISOC wait for In Token Received
  1906. * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token
  1907. * Received when EP is disabled interrupt to obtain starting microframe
  1908. * (odd/even) start transfer
  1909. */
  1910. if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC)
  1911. {
  1912. if (req != 0) {
  1913. depctl_data_t depctl = {.d32 = DWC_READ_REG32(&pcd->core_if->dev_if->in_ep_regs[ep->dwc_ep.num]->diepctl)};
  1914. ++pcd->request_pending;
  1915. DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
  1916. if (ep->dwc_ep.is_in)
  1917. {
  1918. depctl.b.cnak = 1;
  1919. DWC_WRITE_REG32(&pcd->core_if->dev_if->in_ep_regs[ep->dwc_ep.num]->diepctl, depctl.d32);
  1920. }
  1921. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  1922. }
  1923. return 0;
  1924. }
  1925. /*
  1926. * For EP0 IN without premature status, zlp is required?
  1927. */
  1928. if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
  1929. DWC_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->dwc_ep.num);
  1930. //_req->zero = 1;
  1931. }
  1932. /* Start the transfer */
  1933. if (DWC_CIRCLEQ_EMPTY(&ep->queue) && !ep->stopped) {
  1934. /* EP0 Transfer? */
  1935. if (ep->dwc_ep.num == 0) {
  1936. switch (pcd->ep0state) {
  1937. case EP0_IN_DATA_PHASE:
  1938. DWC_DEBUGPL(DBG_PCD,
  1939. "%s ep0: EP0_IN_DATA_PHASE\n",
  1940. __func__);
  1941. break;
  1942. case EP0_OUT_DATA_PHASE:
  1943. DWC_DEBUGPL(DBG_PCD,
  1944. "%s ep0: EP0_OUT_DATA_PHASE\n",
  1945. __func__);
  1946. if (pcd->request_config) {
  1947. /* Complete STATUS PHASE */
  1948. ep->dwc_ep.is_in = 1;
  1949. pcd->ep0state = EP0_IN_STATUS_PHASE;
  1950. }
  1951. break;
  1952. case EP0_IN_STATUS_PHASE:
  1953. DWC_DEBUGPL(DBG_PCD,
  1954. "%s ep0: EP0_IN_STATUS_PHASE\n",
  1955. __func__);
  1956. break;
  1957. default:
  1958. DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
  1959. pcd->ep0state);
  1960. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  1961. return -DWC_E_SHUTDOWN;
  1962. }
  1963. ep->dwc_ep.dma_addr = dma_buf;
  1964. ep->dwc_ep.start_xfer_buff = buf;
  1965. ep->dwc_ep.xfer_buff = buf;
  1966. ep->dwc_ep.xfer_len = buflen;
  1967. ep->dwc_ep.xfer_count = 0;
  1968. ep->dwc_ep.sent_zlp = 0;
  1969. ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
  1970. if (zero) {
  1971. if ((ep->dwc_ep.xfer_len %
  1972. ep->dwc_ep.maxpacket == 0)
  1973. && (ep->dwc_ep.xfer_len != 0)) {
  1974. ep->dwc_ep.sent_zlp = 1;
  1975. }
  1976. }
  1977. dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
  1978. &ep->dwc_ep);
  1979. } // non-ep0 endpoints
  1980. else {
  1981. #ifdef DWC_UTE_CFI
  1982. if (ep->dwc_ep.buff_mode != BM_STANDARD) {
  1983. /* store the request length */
  1984. ep->dwc_ep.cfi_req_len = buflen;
  1985. pcd->cfi->ops.build_descriptors(pcd->cfi, pcd,
  1986. ep, req);
  1987. } else {
  1988. #endif
  1989. max_transfer =
  1990. GET_CORE_IF(ep->pcd)->
  1991. core_params->max_transfer_size;
  1992. /* Setup and start the Transfer */
  1993. if (req->dw_align_buf){
  1994. if (ep->dwc_ep.is_in)
  1995. dwc_memcpy(req->dw_align_buf, buf, buflen);
  1996. ep->dwc_ep.dma_addr = req->dw_align_buf_dma;
  1997. ep->dwc_ep.start_xfer_buff = req->dw_align_buf;
  1998. ep->dwc_ep.xfer_buff = req->dw_align_buf;
  1999. } else {
  2000. ep->dwc_ep.dma_addr = dma_buf;
  2001. ep->dwc_ep.start_xfer_buff = buf;
  2002. ep->dwc_ep.xfer_buff = buf;
  2003. }
  2004. ep->dwc_ep.xfer_len = 0;
  2005. ep->dwc_ep.xfer_count = 0;
  2006. ep->dwc_ep.sent_zlp = 0;
  2007. ep->dwc_ep.total_len = buflen;
  2008. ep->dwc_ep.maxxfer = max_transfer;
  2009. if (GET_CORE_IF(pcd)->dma_desc_enable) {
  2010. uint32_t out_max_xfer =
  2011. DDMA_MAX_TRANSFER_SIZE -
  2012. (DDMA_MAX_TRANSFER_SIZE % 4);
  2013. if (ep->dwc_ep.is_in) {
  2014. if (ep->dwc_ep.maxxfer >
  2015. DDMA_MAX_TRANSFER_SIZE) {
  2016. ep->dwc_ep.maxxfer =
  2017. DDMA_MAX_TRANSFER_SIZE;
  2018. }
  2019. } else {
  2020. if (ep->dwc_ep.maxxfer >
  2021. out_max_xfer) {
  2022. ep->dwc_ep.maxxfer =
  2023. out_max_xfer;
  2024. }
  2025. }
  2026. }
  2027. if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
  2028. ep->dwc_ep.maxxfer -=
  2029. (ep->dwc_ep.maxxfer %
  2030. ep->dwc_ep.maxpacket);
  2031. }
  2032. if (zero) {
  2033. if ((ep->dwc_ep.total_len %
  2034. ep->dwc_ep.maxpacket == 0)
  2035. && (ep->dwc_ep.total_len != 0)) {
  2036. ep->dwc_ep.sent_zlp = 1;
  2037. }
  2038. }
  2039. #ifdef DWC_UTE_CFI
  2040. }
  2041. #endif
  2042. dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
  2043. &ep->dwc_ep);
  2044. }
  2045. }
  2046. if (req != 0) {
  2047. ++pcd->request_pending;
  2048. DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
  2049. if (ep->dwc_ep.is_in && ep->stopped
  2050. && !(GET_CORE_IF(pcd)->dma_enable)) {
  2051. /** @todo NGS Create a function for this. */
  2052. diepmsk_data_t diepmsk = {.d32 = 0 };
  2053. diepmsk.b.intktxfemp = 1;
  2054. if (GET_CORE_IF(pcd)->multiproc_int_enable) {
  2055. DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
  2056. dev_global_regs->
  2057. diepeachintmsk[ep->dwc_ep.num],
  2058. 0, diepmsk.d32);
  2059. } else {
  2060. DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
  2061. dev_global_regs->diepmsk, 0,
  2062. diepmsk.d32);
  2063. }
  2064. }
  2065. }
  2066. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  2067. return 0;
  2068. }
  2069. int dwc_otg_pcd_ep_dequeue(dwc_otg_pcd_t * pcd, void *ep_handle,
  2070. void *req_handle)
  2071. {
  2072. dwc_irqflags_t flags;
  2073. dwc_otg_pcd_request_t *req;
  2074. dwc_otg_pcd_ep_t *ep;
  2075. ep = get_ep_from_handle(pcd, ep_handle);
  2076. if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
  2077. DWC_WARN("bad argument\n");
  2078. return -DWC_E_INVALID;
  2079. }
  2080. DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
  2081. /* make sure it's actually queued on this endpoint */
  2082. DWC_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
  2083. if (req->priv == (void *)req_handle) {
  2084. break;
  2085. }
  2086. }
  2087. if (req->priv != (void *)req_handle) {
  2088. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  2089. return -DWC_E_INVALID;
  2090. }
  2091. if (!DWC_CIRCLEQ_EMPTY_ENTRY(req, queue_entry)) {
  2092. dwc_otg_request_done(ep, req, -DWC_E_RESTART);
  2093. } else {
  2094. req = NULL;
  2095. }
  2096. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  2097. return req ? 0 : -DWC_E_SHUTDOWN;
  2098. }
  2099. int dwc_otg_pcd_ep_halt(dwc_otg_pcd_t * pcd, void *ep_handle, int value)
  2100. {
  2101. dwc_otg_pcd_ep_t *ep;
  2102. dwc_irqflags_t flags;
  2103. int retval = 0;
  2104. ep = get_ep_from_handle(pcd, ep_handle);
  2105. if (!ep || (!ep->desc && ep != &pcd->ep0) ||
  2106. (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
  2107. DWC_WARN("%s, bad ep\n", __func__);
  2108. return -DWC_E_INVALID;
  2109. }
  2110. DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
  2111. if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
  2112. DWC_WARN("%d %s XFer In process\n", ep->dwc_ep.num,
  2113. ep->dwc_ep.is_in ? "IN" : "OUT");
  2114. retval = -DWC_E_AGAIN;
  2115. } else if (value == 0) {
  2116. dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
  2117. } else if (value == 1) {
  2118. if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
  2119. dtxfsts_data_t txstatus;
  2120. fifosize_data_t txfifosize;
  2121. txfifosize.d32 =
  2122. DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
  2123. dtxfsiz[ep->dwc_ep.tx_fifo_num]);
  2124. txstatus.d32 =
  2125. DWC_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
  2126. in_ep_regs[ep->dwc_ep.num]->dtxfsts);
  2127. if (txstatus.b.txfspcavail < txfifosize.b.depth) {
  2128. DWC_WARN("%s() Data In Tx Fifo\n", __func__);
  2129. retval = -DWC_E_AGAIN;
  2130. } else {
  2131. if (ep->dwc_ep.num == 0) {
  2132. pcd->ep0state = EP0_STALL;
  2133. }
  2134. ep->stopped = 1;
  2135. dwc_otg_ep_set_stall(GET_CORE_IF(pcd),
  2136. &ep->dwc_ep);
  2137. }
  2138. } else {
  2139. if (ep->dwc_ep.num == 0) {
  2140. pcd->ep0state = EP0_STALL;
  2141. }
  2142. ep->stopped = 1;
  2143. dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
  2144. }
  2145. } else if (value == 2) {
  2146. ep->dwc_ep.stall_clear_flag = 0;
  2147. } else if (value == 3) {
  2148. ep->dwc_ep.stall_clear_flag = 1;
  2149. }
  2150. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  2151. return retval;
  2152. }
  2153. /**
  2154. * This function initiates remote wakeup of the host from suspend state.
  2155. */
  2156. void dwc_otg_pcd_rem_wkup_from_suspend(dwc_otg_pcd_t * pcd, int set)
  2157. {
  2158. dctl_data_t dctl = { 0 };
  2159. dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
  2160. dsts_data_t dsts;
  2161. dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
  2162. if (!dsts.b.suspsts) {
  2163. DWC_WARN("Remote wakeup while is not in suspend state\n");
  2164. }
  2165. /* Check if DEVICE_REMOTE_WAKEUP feature enabled */
  2166. if (pcd->remote_wakeup_enable) {
  2167. if (set) {
  2168. if (core_if->adp_enable) {
  2169. gpwrdn_data_t gpwrdn;
  2170. dwc_otg_adp_probe_stop(core_if);
  2171. /* Mask SRP detected interrupt from Power Down Logic */
  2172. gpwrdn.d32 = 0;
  2173. gpwrdn.b.srp_det_msk = 1;
  2174. DWC_MODIFY_REG32(&core_if->core_global_regs->
  2175. gpwrdn, gpwrdn.d32, 0);
  2176. /* Disable Power Down Logic */
  2177. gpwrdn.d32 = 0;
  2178. gpwrdn.b.pmuactv = 1;
  2179. DWC_MODIFY_REG32(&core_if->core_global_regs->
  2180. gpwrdn, gpwrdn.d32, 0);
  2181. /*
  2182. * Initialize the Core for Device mode.
  2183. */
  2184. core_if->op_state = B_PERIPHERAL;
  2185. dwc_otg_core_init(core_if);
  2186. dwc_otg_enable_global_interrupts(core_if);
  2187. cil_pcd_start(core_if);
  2188. dwc_otg_initiate_srp(core_if);
  2189. }
  2190. dctl.b.rmtwkupsig = 1;
  2191. DWC_MODIFY_REG32(&core_if->dev_if->
  2192. dev_global_regs->dctl, 0, dctl.d32);
  2193. DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
  2194. dwc_mdelay(2);
  2195. DWC_MODIFY_REG32(&core_if->dev_if->
  2196. dev_global_regs->dctl, dctl.d32, 0);
  2197. DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
  2198. }
  2199. } else {
  2200. DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
  2201. }
  2202. }
  2203. #ifdef CONFIG_USB_DWC_OTG_LPM
  2204. /**
  2205. * This function initiates remote wakeup of the host from L1 sleep state.
  2206. */
  2207. void dwc_otg_pcd_rem_wkup_from_sleep(dwc_otg_pcd_t * pcd, int set)
  2208. {
  2209. glpmcfg_data_t lpmcfg;
  2210. dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
  2211. lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
  2212. /* Check if we are in L1 state */
  2213. if (!lpmcfg.b.prt_sleep_sts) {
  2214. DWC_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
  2215. return;
  2216. }
  2217. /* Check if host allows remote wakeup */
  2218. if (!lpmcfg.b.rem_wkup_en) {
  2219. DWC_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
  2220. return;
  2221. }
  2222. /* Check if Resume OK */
  2223. if (!lpmcfg.b.sleep_state_resumeok) {
  2224. DWC_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
  2225. return;
  2226. }
  2227. lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
  2228. lpmcfg.b.en_utmi_sleep = 0;
  2229. lpmcfg.b.hird_thres &= (~(1 << 4));
  2230. DWC_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
  2231. if (set) {
  2232. dctl_data_t dctl = {.d32 = 0 };
  2233. dctl.b.rmtwkupsig = 1;
  2234. /* Set RmtWkUpSig bit to start remote wakup signaling.
  2235. * Hardware will automatically clear this bit.
  2236. */
  2237. DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
  2238. 0, dctl.d32);
  2239. DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
  2240. }
  2241. }
  2242. #endif
  2243. /**
  2244. * Performs remote wakeup.
  2245. */
  2246. void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t * pcd, int set)
  2247. {
  2248. dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
  2249. dwc_irqflags_t flags;
  2250. if (dwc_otg_is_device_mode(core_if)) {
  2251. DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
  2252. #ifdef CONFIG_USB_DWC_OTG_LPM
  2253. if (core_if->lx_state == DWC_OTG_L1) {
  2254. dwc_otg_pcd_rem_wkup_from_sleep(pcd, set);
  2255. } else {
  2256. #endif
  2257. dwc_otg_pcd_rem_wkup_from_suspend(pcd, set);
  2258. #ifdef CONFIG_USB_DWC_OTG_LPM
  2259. }
  2260. #endif
  2261. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  2262. }
  2263. return;
  2264. }
  2265. void dwc_otg_pcd_disconnect_us(dwc_otg_pcd_t * pcd, int no_of_usecs)
  2266. {
  2267. dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
  2268. //dctl_data_t dctl = { 0 };
  2269. if (dwc_otg_is_device_mode(core_if)) {
  2270. //dctl.b.sftdiscon = 1;
  2271. DWC_PRINTF("Soft disconnect for %d useconds\n",no_of_usecs);
  2272. //DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
  2273. dwc_otg_device_soft_disconnect(core_if);
  2274. dwc_udelay(no_of_usecs);
  2275. //DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32,0);
  2276. dwc_otg_device_soft_connect(core_if);
  2277. } else{
  2278. DWC_PRINTF("NOT SUPPORTED IN HOST MODE\n");
  2279. }
  2280. return;
  2281. }
  2282. int dwc_otg_pcd_wakeup(dwc_otg_pcd_t * pcd)
  2283. {
  2284. dsts_data_t dsts;
  2285. gotgctl_data_t gotgctl;
  2286. /*
  2287. * This function starts the Protocol if no session is in progress. If
  2288. * a session is already in progress, but the device is suspended,
  2289. * remote wakeup signaling is started.
  2290. */
  2291. /* Check if valid session */
  2292. gotgctl.d32 =
  2293. DWC_READ_REG32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
  2294. if (gotgctl.b.bsesvld) {
  2295. /* Check if suspend state */
  2296. dsts.d32 =
  2297. DWC_READ_REG32(&
  2298. (GET_CORE_IF(pcd)->dev_if->
  2299. dev_global_regs->dsts));
  2300. if (dsts.b.suspsts) {
  2301. dwc_otg_pcd_remote_wakeup(pcd, 1);
  2302. }
  2303. } else {
  2304. dwc_otg_pcd_initiate_srp(pcd);
  2305. }
  2306. return 0;
  2307. }
  2308. /**
  2309. * Start the SRP timer to detect when the SRP does not complete within
  2310. * 6 seconds.
  2311. *
  2312. * @param pcd the pcd structure.
  2313. */
  2314. void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t * pcd)
  2315. {
  2316. dwc_irqflags_t flags;
  2317. DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
  2318. dwc_otg_initiate_srp(GET_CORE_IF(pcd));
  2319. DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
  2320. }
  2321. int dwc_otg_pcd_get_frame_number(dwc_otg_pcd_t * pcd)
  2322. {
  2323. return dwc_otg_get_frame_number(GET_CORE_IF(pcd));
  2324. }
  2325. int dwc_otg_pcd_is_lpm_enabled(dwc_otg_pcd_t * pcd)
  2326. {
  2327. return GET_CORE_IF(pcd)->core_params->lpm_enable;
  2328. }
  2329. uint32_t get_b_hnp_enable(dwc_otg_pcd_t * pcd)
  2330. {
  2331. return pcd->b_hnp_enable;
  2332. }
  2333. uint32_t get_a_hnp_support(dwc_otg_pcd_t * pcd)
  2334. {
  2335. return pcd->a_hnp_support;
  2336. }
  2337. uint32_t get_a_alt_hnp_support(dwc_otg_pcd_t * pcd)
  2338. {
  2339. return pcd->a_alt_hnp_support;
  2340. }
  2341. int dwc_otg_pcd_get_rmwkup_enable(dwc_otg_pcd_t * pcd)
  2342. {
  2343. return pcd->remote_wakeup_enable;
  2344. }
  2345. #endif /* DWC_HOST_ONLY */