r8a66597-udc.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044
  1. /*
  2. * R8A66597 UDC (USB gadget)
  3. *
  4. * Copyright (C) 2006-2009 Renesas Solutions Corp.
  5. *
  6. * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; version 2 of the License.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/delay.h>
  15. #include <linux/io.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/clk.h>
  18. #include <linux/err.h>
  19. #include <linux/slab.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/usb/ch9.h>
  22. #include <linux/usb/gadget.h>
  23. #include "r8a66597-udc.h"
  24. #define DRIVER_VERSION "2011-09-26"
  25. static const char udc_name[] = "r8a66597_udc";
  26. static const char *r8a66597_ep_name[] = {
  27. "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7",
  28. "ep8", "ep9",
  29. };
  30. static void init_controller(struct r8a66597 *r8a66597);
  31. static void disable_controller(struct r8a66597 *r8a66597);
  32. static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
  33. static void irq_packet_write(struct r8a66597_ep *ep,
  34. struct r8a66597_request *req);
  35. static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
  36. gfp_t gfp_flags);
  37. static void transfer_complete(struct r8a66597_ep *ep,
  38. struct r8a66597_request *req, int status);
  39. /*-------------------------------------------------------------------------*/
  40. static inline u16 get_usb_speed(struct r8a66597 *r8a66597)
  41. {
  42. return r8a66597_read(r8a66597, DVSTCTR0) & RHST;
  43. }
  44. static void enable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
  45. unsigned long reg)
  46. {
  47. u16 tmp;
  48. tmp = r8a66597_read(r8a66597, INTENB0);
  49. r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
  50. INTENB0);
  51. r8a66597_bset(r8a66597, (1 << pipenum), reg);
  52. r8a66597_write(r8a66597, tmp, INTENB0);
  53. }
  54. static void disable_pipe_irq(struct r8a66597 *r8a66597, u16 pipenum,
  55. unsigned long reg)
  56. {
  57. u16 tmp;
  58. tmp = r8a66597_read(r8a66597, INTENB0);
  59. r8a66597_bclr(r8a66597, BEMPE | NRDYE | BRDYE,
  60. INTENB0);
  61. r8a66597_bclr(r8a66597, (1 << pipenum), reg);
  62. r8a66597_write(r8a66597, tmp, INTENB0);
  63. }
  64. static void r8a66597_usb_connect(struct r8a66597 *r8a66597)
  65. {
  66. r8a66597_bset(r8a66597, CTRE, INTENB0);
  67. r8a66597_bset(r8a66597, BEMPE | BRDYE, INTENB0);
  68. r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
  69. }
  70. static void r8a66597_usb_disconnect(struct r8a66597 *r8a66597)
  71. __releases(r8a66597->lock)
  72. __acquires(r8a66597->lock)
  73. {
  74. r8a66597_bclr(r8a66597, CTRE, INTENB0);
  75. r8a66597_bclr(r8a66597, BEMPE | BRDYE, INTENB0);
  76. r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
  77. r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
  78. spin_unlock(&r8a66597->lock);
  79. r8a66597->driver->disconnect(&r8a66597->gadget);
  80. spin_lock(&r8a66597->lock);
  81. disable_controller(r8a66597);
  82. init_controller(r8a66597);
  83. r8a66597_bset(r8a66597, VBSE, INTENB0);
  84. INIT_LIST_HEAD(&r8a66597->ep[0].queue);
  85. }
  86. static inline u16 control_reg_get_pid(struct r8a66597 *r8a66597, u16 pipenum)
  87. {
  88. u16 pid = 0;
  89. unsigned long offset;
  90. if (pipenum == 0) {
  91. pid = r8a66597_read(r8a66597, DCPCTR) & PID;
  92. } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
  93. offset = get_pipectr_addr(pipenum);
  94. pid = r8a66597_read(r8a66597, offset) & PID;
  95. } else {
  96. dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
  97. pipenum);
  98. }
  99. return pid;
  100. }
  101. static inline void control_reg_set_pid(struct r8a66597 *r8a66597, u16 pipenum,
  102. u16 pid)
  103. {
  104. unsigned long offset;
  105. if (pipenum == 0) {
  106. r8a66597_mdfy(r8a66597, pid, PID, DCPCTR);
  107. } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
  108. offset = get_pipectr_addr(pipenum);
  109. r8a66597_mdfy(r8a66597, pid, PID, offset);
  110. } else {
  111. dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
  112. pipenum);
  113. }
  114. }
  115. static inline void pipe_start(struct r8a66597 *r8a66597, u16 pipenum)
  116. {
  117. control_reg_set_pid(r8a66597, pipenum, PID_BUF);
  118. }
  119. static inline void pipe_stop(struct r8a66597 *r8a66597, u16 pipenum)
  120. {
  121. control_reg_set_pid(r8a66597, pipenum, PID_NAK);
  122. }
  123. static inline void pipe_stall(struct r8a66597 *r8a66597, u16 pipenum)
  124. {
  125. control_reg_set_pid(r8a66597, pipenum, PID_STALL);
  126. }
  127. static inline u16 control_reg_get(struct r8a66597 *r8a66597, u16 pipenum)
  128. {
  129. u16 ret = 0;
  130. unsigned long offset;
  131. if (pipenum == 0) {
  132. ret = r8a66597_read(r8a66597, DCPCTR);
  133. } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
  134. offset = get_pipectr_addr(pipenum);
  135. ret = r8a66597_read(r8a66597, offset);
  136. } else {
  137. dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
  138. pipenum);
  139. }
  140. return ret;
  141. }
  142. static inline void control_reg_sqclr(struct r8a66597 *r8a66597, u16 pipenum)
  143. {
  144. unsigned long offset;
  145. pipe_stop(r8a66597, pipenum);
  146. if (pipenum == 0) {
  147. r8a66597_bset(r8a66597, SQCLR, DCPCTR);
  148. } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
  149. offset = get_pipectr_addr(pipenum);
  150. r8a66597_bset(r8a66597, SQCLR, offset);
  151. } else {
  152. dev_err(r8a66597_to_dev(r8a66597), "unexpect pipe num (%d)\n",
  153. pipenum);
  154. }
  155. }
  156. static void control_reg_sqset(struct r8a66597 *r8a66597, u16 pipenum)
  157. {
  158. unsigned long offset;
  159. pipe_stop(r8a66597, pipenum);
  160. if (pipenum == 0) {
  161. r8a66597_bset(r8a66597, SQSET, DCPCTR);
  162. } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
  163. offset = get_pipectr_addr(pipenum);
  164. r8a66597_bset(r8a66597, SQSET, offset);
  165. } else {
  166. dev_err(r8a66597_to_dev(r8a66597),
  167. "unexpect pipe num(%d)\n", pipenum);
  168. }
  169. }
  170. static u16 control_reg_sqmon(struct r8a66597 *r8a66597, u16 pipenum)
  171. {
  172. unsigned long offset;
  173. if (pipenum == 0) {
  174. return r8a66597_read(r8a66597, DCPCTR) & SQMON;
  175. } else if (pipenum < R8A66597_MAX_NUM_PIPE) {
  176. offset = get_pipectr_addr(pipenum);
  177. return r8a66597_read(r8a66597, offset) & SQMON;
  178. } else {
  179. dev_err(r8a66597_to_dev(r8a66597),
  180. "unexpect pipe num(%d)\n", pipenum);
  181. }
  182. return 0;
  183. }
  184. static u16 save_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum)
  185. {
  186. return control_reg_sqmon(r8a66597, pipenum);
  187. }
  188. static void restore_usb_toggle(struct r8a66597 *r8a66597, u16 pipenum,
  189. u16 toggle)
  190. {
  191. if (toggle)
  192. control_reg_sqset(r8a66597, pipenum);
  193. else
  194. control_reg_sqclr(r8a66597, pipenum);
  195. }
  196. static inline int get_buffer_size(struct r8a66597 *r8a66597, u16 pipenum)
  197. {
  198. u16 tmp;
  199. int size;
  200. if (pipenum == 0) {
  201. tmp = r8a66597_read(r8a66597, DCPCFG);
  202. if ((tmp & R8A66597_CNTMD) != 0)
  203. size = 256;
  204. else {
  205. tmp = r8a66597_read(r8a66597, DCPMAXP);
  206. size = tmp & MAXP;
  207. }
  208. } else {
  209. r8a66597_write(r8a66597, pipenum, PIPESEL);
  210. tmp = r8a66597_read(r8a66597, PIPECFG);
  211. if ((tmp & R8A66597_CNTMD) != 0) {
  212. tmp = r8a66597_read(r8a66597, PIPEBUF);
  213. size = ((tmp >> 10) + 1) * 64;
  214. } else {
  215. tmp = r8a66597_read(r8a66597, PIPEMAXP);
  216. size = tmp & MXPS;
  217. }
  218. }
  219. return size;
  220. }
  221. static inline unsigned short mbw_value(struct r8a66597 *r8a66597)
  222. {
  223. if (r8a66597->pdata->on_chip)
  224. return MBW_32;
  225. else
  226. return MBW_16;
  227. }
  228. static void r8a66597_change_curpipe(struct r8a66597 *r8a66597, u16 pipenum,
  229. u16 isel, u16 fifosel)
  230. {
  231. u16 tmp, mask, loop;
  232. int i = 0;
  233. if (!pipenum) {
  234. mask = ISEL | CURPIPE;
  235. loop = isel;
  236. } else {
  237. mask = CURPIPE;
  238. loop = pipenum;
  239. }
  240. r8a66597_mdfy(r8a66597, loop, mask, fifosel);
  241. do {
  242. tmp = r8a66597_read(r8a66597, fifosel);
  243. if (i++ > 1000000) {
  244. dev_err(r8a66597_to_dev(r8a66597),
  245. "r8a66597: register%x, loop %x "
  246. "is timeout\n", fifosel, loop);
  247. break;
  248. }
  249. ndelay(1);
  250. } while ((tmp & mask) != loop);
  251. }
  252. static inline void pipe_change(struct r8a66597 *r8a66597, u16 pipenum)
  253. {
  254. struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
  255. if (ep->use_dma)
  256. r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
  257. r8a66597_mdfy(r8a66597, pipenum, CURPIPE, ep->fifosel);
  258. ndelay(450);
  259. if (r8a66597_is_sudmac(r8a66597) && ep->use_dma)
  260. r8a66597_bclr(r8a66597, mbw_value(r8a66597), ep->fifosel);
  261. else
  262. r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
  263. if (ep->use_dma)
  264. r8a66597_bset(r8a66597, DREQE, ep->fifosel);
  265. }
  266. static int pipe_buffer_setting(struct r8a66597 *r8a66597,
  267. struct r8a66597_pipe_info *info)
  268. {
  269. u16 bufnum = 0, buf_bsize = 0;
  270. u16 pipecfg = 0;
  271. if (info->pipe == 0)
  272. return -EINVAL;
  273. r8a66597_write(r8a66597, info->pipe, PIPESEL);
  274. if (info->dir_in)
  275. pipecfg |= R8A66597_DIR;
  276. pipecfg |= info->type;
  277. pipecfg |= info->epnum;
  278. switch (info->type) {
  279. case R8A66597_INT:
  280. bufnum = 4 + (info->pipe - R8A66597_BASE_PIPENUM_INT);
  281. buf_bsize = 0;
  282. break;
  283. case R8A66597_BULK:
  284. /* isochronous pipes may be used as bulk pipes */
  285. if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
  286. bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
  287. else
  288. bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
  289. bufnum = R8A66597_BASE_BUFNUM + (bufnum * 16);
  290. buf_bsize = 7;
  291. pipecfg |= R8A66597_DBLB;
  292. if (!info->dir_in)
  293. pipecfg |= R8A66597_SHTNAK;
  294. break;
  295. case R8A66597_ISO:
  296. bufnum = R8A66597_BASE_BUFNUM +
  297. (info->pipe - R8A66597_BASE_PIPENUM_ISOC) * 16;
  298. buf_bsize = 7;
  299. break;
  300. }
  301. if (buf_bsize && ((bufnum + 16) >= R8A66597_MAX_BUFNUM)) {
  302. pr_err("r8a66597 pipe memory is insufficient\n");
  303. return -ENOMEM;
  304. }
  305. r8a66597_write(r8a66597, pipecfg, PIPECFG);
  306. r8a66597_write(r8a66597, (buf_bsize << 10) | (bufnum), PIPEBUF);
  307. r8a66597_write(r8a66597, info->maxpacket, PIPEMAXP);
  308. if (info->interval)
  309. info->interval--;
  310. r8a66597_write(r8a66597, info->interval, PIPEPERI);
  311. return 0;
  312. }
  313. static void pipe_buffer_release(struct r8a66597 *r8a66597,
  314. struct r8a66597_pipe_info *info)
  315. {
  316. if (info->pipe == 0)
  317. return;
  318. if (is_bulk_pipe(info->pipe)) {
  319. r8a66597->bulk--;
  320. } else if (is_interrupt_pipe(info->pipe)) {
  321. r8a66597->interrupt--;
  322. } else if (is_isoc_pipe(info->pipe)) {
  323. r8a66597->isochronous--;
  324. if (info->type == R8A66597_BULK)
  325. r8a66597->bulk--;
  326. } else {
  327. dev_err(r8a66597_to_dev(r8a66597),
  328. "ep_release: unexpect pipenum (%d)\n", info->pipe);
  329. }
  330. }
  331. static void pipe_initialize(struct r8a66597_ep *ep)
  332. {
  333. struct r8a66597 *r8a66597 = ep->r8a66597;
  334. r8a66597_mdfy(r8a66597, 0, CURPIPE, ep->fifosel);
  335. r8a66597_write(r8a66597, ACLRM, ep->pipectr);
  336. r8a66597_write(r8a66597, 0, ep->pipectr);
  337. r8a66597_write(r8a66597, SQCLR, ep->pipectr);
  338. if (ep->use_dma) {
  339. r8a66597_mdfy(r8a66597, ep->pipenum, CURPIPE, ep->fifosel);
  340. ndelay(450);
  341. r8a66597_bset(r8a66597, mbw_value(r8a66597), ep->fifosel);
  342. }
  343. }
  344. static void r8a66597_ep_setting(struct r8a66597 *r8a66597,
  345. struct r8a66597_ep *ep,
  346. const struct usb_endpoint_descriptor *desc,
  347. u16 pipenum, int dma)
  348. {
  349. ep->use_dma = 0;
  350. ep->fifoaddr = CFIFO;
  351. ep->fifosel = CFIFOSEL;
  352. ep->fifoctr = CFIFOCTR;
  353. ep->pipectr = get_pipectr_addr(pipenum);
  354. if (is_bulk_pipe(pipenum) || is_isoc_pipe(pipenum)) {
  355. ep->pipetre = get_pipetre_addr(pipenum);
  356. ep->pipetrn = get_pipetrn_addr(pipenum);
  357. } else {
  358. ep->pipetre = 0;
  359. ep->pipetrn = 0;
  360. }
  361. ep->pipenum = pipenum;
  362. ep->ep.maxpacket = usb_endpoint_maxp(desc);
  363. r8a66597->pipenum2ep[pipenum] = ep;
  364. r8a66597->epaddr2ep[desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK]
  365. = ep;
  366. INIT_LIST_HEAD(&ep->queue);
  367. }
  368. static void r8a66597_ep_release(struct r8a66597_ep *ep)
  369. {
  370. struct r8a66597 *r8a66597 = ep->r8a66597;
  371. u16 pipenum = ep->pipenum;
  372. if (pipenum == 0)
  373. return;
  374. if (ep->use_dma)
  375. r8a66597->num_dma--;
  376. ep->pipenum = 0;
  377. ep->busy = 0;
  378. ep->use_dma = 0;
  379. }
  380. static int alloc_pipe_config(struct r8a66597_ep *ep,
  381. const struct usb_endpoint_descriptor *desc)
  382. {
  383. struct r8a66597 *r8a66597 = ep->r8a66597;
  384. struct r8a66597_pipe_info info;
  385. int dma = 0;
  386. unsigned char *counter;
  387. int ret;
  388. ep->desc = desc;
  389. if (ep->pipenum) /* already allocated pipe */
  390. return 0;
  391. switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
  392. case USB_ENDPOINT_XFER_BULK:
  393. if (r8a66597->bulk >= R8A66597_MAX_NUM_BULK) {
  394. if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
  395. dev_err(r8a66597_to_dev(r8a66597),
  396. "bulk pipe is insufficient\n");
  397. return -ENODEV;
  398. } else {
  399. info.pipe = R8A66597_BASE_PIPENUM_ISOC
  400. + r8a66597->isochronous;
  401. counter = &r8a66597->isochronous;
  402. }
  403. } else {
  404. info.pipe = R8A66597_BASE_PIPENUM_BULK + r8a66597->bulk;
  405. counter = &r8a66597->bulk;
  406. }
  407. info.type = R8A66597_BULK;
  408. dma = 1;
  409. break;
  410. case USB_ENDPOINT_XFER_INT:
  411. if (r8a66597->interrupt >= R8A66597_MAX_NUM_INT) {
  412. dev_err(r8a66597_to_dev(r8a66597),
  413. "interrupt pipe is insufficient\n");
  414. return -ENODEV;
  415. }
  416. info.pipe = R8A66597_BASE_PIPENUM_INT + r8a66597->interrupt;
  417. info.type = R8A66597_INT;
  418. counter = &r8a66597->interrupt;
  419. break;
  420. case USB_ENDPOINT_XFER_ISOC:
  421. if (r8a66597->isochronous >= R8A66597_MAX_NUM_ISOC) {
  422. dev_err(r8a66597_to_dev(r8a66597),
  423. "isochronous pipe is insufficient\n");
  424. return -ENODEV;
  425. }
  426. info.pipe = R8A66597_BASE_PIPENUM_ISOC + r8a66597->isochronous;
  427. info.type = R8A66597_ISO;
  428. counter = &r8a66597->isochronous;
  429. break;
  430. default:
  431. dev_err(r8a66597_to_dev(r8a66597), "unexpect xfer type\n");
  432. return -EINVAL;
  433. }
  434. ep->type = info.type;
  435. info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
  436. info.maxpacket = usb_endpoint_maxp(desc);
  437. info.interval = desc->bInterval;
  438. if (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
  439. info.dir_in = 1;
  440. else
  441. info.dir_in = 0;
  442. ret = pipe_buffer_setting(r8a66597, &info);
  443. if (ret < 0) {
  444. dev_err(r8a66597_to_dev(r8a66597),
  445. "pipe_buffer_setting fail\n");
  446. return ret;
  447. }
  448. (*counter)++;
  449. if ((counter == &r8a66597->isochronous) && info.type == R8A66597_BULK)
  450. r8a66597->bulk++;
  451. r8a66597_ep_setting(r8a66597, ep, desc, info.pipe, dma);
  452. pipe_initialize(ep);
  453. return 0;
  454. }
  455. static int free_pipe_config(struct r8a66597_ep *ep)
  456. {
  457. struct r8a66597 *r8a66597 = ep->r8a66597;
  458. struct r8a66597_pipe_info info;
  459. info.pipe = ep->pipenum;
  460. info.type = ep->type;
  461. pipe_buffer_release(r8a66597, &info);
  462. r8a66597_ep_release(ep);
  463. return 0;
  464. }
  465. /*-------------------------------------------------------------------------*/
  466. static void pipe_irq_enable(struct r8a66597 *r8a66597, u16 pipenum)
  467. {
  468. enable_irq_ready(r8a66597, pipenum);
  469. enable_irq_nrdy(r8a66597, pipenum);
  470. }
  471. static void pipe_irq_disable(struct r8a66597 *r8a66597, u16 pipenum)
  472. {
  473. disable_irq_ready(r8a66597, pipenum);
  474. disable_irq_nrdy(r8a66597, pipenum);
  475. }
  476. /* if complete is true, gadget driver complete function is not call */
  477. static void control_end(struct r8a66597 *r8a66597, unsigned ccpl)
  478. {
  479. r8a66597->ep[0].internal_ccpl = ccpl;
  480. pipe_start(r8a66597, 0);
  481. r8a66597_bset(r8a66597, CCPL, DCPCTR);
  482. }
  483. static void start_ep0_write(struct r8a66597_ep *ep,
  484. struct r8a66597_request *req)
  485. {
  486. struct r8a66597 *r8a66597 = ep->r8a66597;
  487. pipe_change(r8a66597, ep->pipenum);
  488. r8a66597_mdfy(r8a66597, ISEL, (ISEL | CURPIPE), CFIFOSEL);
  489. r8a66597_write(r8a66597, BCLR, ep->fifoctr);
  490. if (req->req.length == 0) {
  491. r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
  492. pipe_start(r8a66597, 0);
  493. transfer_complete(ep, req, 0);
  494. } else {
  495. r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
  496. irq_ep0_write(ep, req);
  497. }
  498. }
  499. static void disable_fifosel(struct r8a66597 *r8a66597, u16 pipenum,
  500. u16 fifosel)
  501. {
  502. u16 tmp;
  503. tmp = r8a66597_read(r8a66597, fifosel) & CURPIPE;
  504. if (tmp == pipenum)
  505. r8a66597_change_curpipe(r8a66597, 0, 0, fifosel);
  506. }
  507. static void change_bfre_mode(struct r8a66597 *r8a66597, u16 pipenum,
  508. int enable)
  509. {
  510. struct r8a66597_ep *ep = r8a66597->pipenum2ep[pipenum];
  511. u16 tmp, toggle;
  512. /* check current BFRE bit */
  513. r8a66597_write(r8a66597, pipenum, PIPESEL);
  514. tmp = r8a66597_read(r8a66597, PIPECFG) & R8A66597_BFRE;
  515. if ((enable && tmp) || (!enable && !tmp))
  516. return;
  517. /* change BFRE bit */
  518. pipe_stop(r8a66597, pipenum);
  519. disable_fifosel(r8a66597, pipenum, CFIFOSEL);
  520. disable_fifosel(r8a66597, pipenum, D0FIFOSEL);
  521. disable_fifosel(r8a66597, pipenum, D1FIFOSEL);
  522. toggle = save_usb_toggle(r8a66597, pipenum);
  523. r8a66597_write(r8a66597, pipenum, PIPESEL);
  524. if (enable)
  525. r8a66597_bset(r8a66597, R8A66597_BFRE, PIPECFG);
  526. else
  527. r8a66597_bclr(r8a66597, R8A66597_BFRE, PIPECFG);
  528. /* initialize for internal BFRE flag */
  529. r8a66597_bset(r8a66597, ACLRM, ep->pipectr);
  530. r8a66597_bclr(r8a66597, ACLRM, ep->pipectr);
  531. restore_usb_toggle(r8a66597, pipenum, toggle);
  532. }
  533. static int sudmac_alloc_channel(struct r8a66597 *r8a66597,
  534. struct r8a66597_ep *ep,
  535. struct r8a66597_request *req)
  536. {
  537. struct r8a66597_dma *dma;
  538. if (!r8a66597_is_sudmac(r8a66597))
  539. return -ENODEV;
  540. /* Check transfer type */
  541. if (!is_bulk_pipe(ep->pipenum))
  542. return -EIO;
  543. if (r8a66597->dma.used)
  544. return -EBUSY;
  545. /* set SUDMAC parameters */
  546. dma = &r8a66597->dma;
  547. dma->used = 1;
  548. if (ep->desc->bEndpointAddress & USB_DIR_IN) {
  549. dma->dir = 1;
  550. } else {
  551. dma->dir = 0;
  552. change_bfre_mode(r8a66597, ep->pipenum, 1);
  553. }
  554. /* set r8a66597_ep paramters */
  555. ep->use_dma = 1;
  556. ep->dma = dma;
  557. ep->fifoaddr = D0FIFO;
  558. ep->fifosel = D0FIFOSEL;
  559. ep->fifoctr = D0FIFOCTR;
  560. /* dma mapping */
  561. return usb_gadget_map_request(&r8a66597->gadget, &req->req, dma->dir);
  562. }
  563. static void sudmac_free_channel(struct r8a66597 *r8a66597,
  564. struct r8a66597_ep *ep,
  565. struct r8a66597_request *req)
  566. {
  567. if (!r8a66597_is_sudmac(r8a66597))
  568. return;
  569. usb_gadget_unmap_request(&r8a66597->gadget, &req->req, ep->dma->dir);
  570. r8a66597_bclr(r8a66597, DREQE, ep->fifosel);
  571. r8a66597_change_curpipe(r8a66597, 0, 0, ep->fifosel);
  572. ep->dma->used = 0;
  573. ep->use_dma = 0;
  574. ep->fifoaddr = CFIFO;
  575. ep->fifosel = CFIFOSEL;
  576. ep->fifoctr = CFIFOCTR;
  577. }
  578. static void sudmac_start(struct r8a66597 *r8a66597, struct r8a66597_ep *ep,
  579. struct r8a66597_request *req)
  580. {
  581. BUG_ON(req->req.length == 0);
  582. r8a66597_sudmac_write(r8a66597, LBA_WAIT, CH0CFG);
  583. r8a66597_sudmac_write(r8a66597, req->req.dma, CH0BA);
  584. r8a66597_sudmac_write(r8a66597, req->req.length, CH0BBC);
  585. r8a66597_sudmac_write(r8a66597, CH0ENDE, DINTCTRL);
  586. r8a66597_sudmac_write(r8a66597, DEN, CH0DEN);
  587. }
  588. static void start_packet_write(struct r8a66597_ep *ep,
  589. struct r8a66597_request *req)
  590. {
  591. struct r8a66597 *r8a66597 = ep->r8a66597;
  592. u16 tmp;
  593. pipe_change(r8a66597, ep->pipenum);
  594. disable_irq_empty(r8a66597, ep->pipenum);
  595. pipe_start(r8a66597, ep->pipenum);
  596. if (req->req.length == 0) {
  597. transfer_complete(ep, req, 0);
  598. } else {
  599. r8a66597_write(r8a66597, ~(1 << ep->pipenum), BRDYSTS);
  600. if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
  601. /* PIO mode */
  602. pipe_change(r8a66597, ep->pipenum);
  603. disable_irq_empty(r8a66597, ep->pipenum);
  604. pipe_start(r8a66597, ep->pipenum);
  605. tmp = r8a66597_read(r8a66597, ep->fifoctr);
  606. if (unlikely((tmp & FRDY) == 0))
  607. pipe_irq_enable(r8a66597, ep->pipenum);
  608. else
  609. irq_packet_write(ep, req);
  610. } else {
  611. /* DMA mode */
  612. pipe_change(r8a66597, ep->pipenum);
  613. disable_irq_nrdy(r8a66597, ep->pipenum);
  614. pipe_start(r8a66597, ep->pipenum);
  615. enable_irq_nrdy(r8a66597, ep->pipenum);
  616. sudmac_start(r8a66597, ep, req);
  617. }
  618. }
  619. }
  620. static void start_packet_read(struct r8a66597_ep *ep,
  621. struct r8a66597_request *req)
  622. {
  623. struct r8a66597 *r8a66597 = ep->r8a66597;
  624. u16 pipenum = ep->pipenum;
  625. if (ep->pipenum == 0) {
  626. r8a66597_mdfy(r8a66597, 0, (ISEL | CURPIPE), CFIFOSEL);
  627. r8a66597_write(r8a66597, BCLR, ep->fifoctr);
  628. pipe_start(r8a66597, pipenum);
  629. pipe_irq_enable(r8a66597, pipenum);
  630. } else {
  631. pipe_stop(r8a66597, pipenum);
  632. if (ep->pipetre) {
  633. enable_irq_nrdy(r8a66597, pipenum);
  634. r8a66597_write(r8a66597, TRCLR, ep->pipetre);
  635. r8a66597_write(r8a66597,
  636. DIV_ROUND_UP(req->req.length, ep->ep.maxpacket),
  637. ep->pipetrn);
  638. r8a66597_bset(r8a66597, TRENB, ep->pipetre);
  639. }
  640. if (sudmac_alloc_channel(r8a66597, ep, req) < 0) {
  641. /* PIO mode */
  642. change_bfre_mode(r8a66597, ep->pipenum, 0);
  643. pipe_start(r8a66597, pipenum); /* trigger once */
  644. pipe_irq_enable(r8a66597, pipenum);
  645. } else {
  646. pipe_change(r8a66597, pipenum);
  647. sudmac_start(r8a66597, ep, req);
  648. pipe_start(r8a66597, pipenum); /* trigger once */
  649. }
  650. }
  651. }
  652. static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req)
  653. {
  654. if (ep->desc->bEndpointAddress & USB_DIR_IN)
  655. start_packet_write(ep, req);
  656. else
  657. start_packet_read(ep, req);
  658. }
  659. static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req)
  660. {
  661. u16 ctsq;
  662. ctsq = r8a66597_read(ep->r8a66597, INTSTS0) & CTSQ;
  663. switch (ctsq) {
  664. case CS_RDDS:
  665. start_ep0_write(ep, req);
  666. break;
  667. case CS_WRDS:
  668. start_packet_read(ep, req);
  669. break;
  670. case CS_WRND:
  671. control_end(ep->r8a66597, 0);
  672. break;
  673. default:
  674. dev_err(r8a66597_to_dev(ep->r8a66597),
  675. "start_ep0: unexpect ctsq(%x)\n", ctsq);
  676. break;
  677. }
  678. }
  679. static void init_controller(struct r8a66597 *r8a66597)
  680. {
  681. u16 vif = r8a66597->pdata->vif ? LDRV : 0;
  682. u16 irq_sense = r8a66597->irq_sense_low ? INTL : 0;
  683. u16 endian = r8a66597->pdata->endian ? BIGEND : 0;
  684. if (r8a66597->pdata->on_chip) {
  685. if (r8a66597->pdata->buswait)
  686. r8a66597_write(r8a66597, r8a66597->pdata->buswait,
  687. SYSCFG1);
  688. else
  689. r8a66597_write(r8a66597, 0x0f, SYSCFG1);
  690. r8a66597_bset(r8a66597, HSE, SYSCFG0);
  691. r8a66597_bclr(r8a66597, USBE, SYSCFG0);
  692. r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
  693. r8a66597_bset(r8a66597, USBE, SYSCFG0);
  694. r8a66597_bset(r8a66597, SCKE, SYSCFG0);
  695. r8a66597_bset(r8a66597, irq_sense, INTENB1);
  696. r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
  697. DMA0CFG);
  698. } else {
  699. r8a66597_bset(r8a66597, vif | endian, PINCFG);
  700. r8a66597_bset(r8a66597, HSE, SYSCFG0); /* High spd */
  701. r8a66597_mdfy(r8a66597, get_xtal_from_pdata(r8a66597->pdata),
  702. XTAL, SYSCFG0);
  703. r8a66597_bclr(r8a66597, USBE, SYSCFG0);
  704. r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
  705. r8a66597_bset(r8a66597, USBE, SYSCFG0);
  706. r8a66597_bset(r8a66597, XCKE, SYSCFG0);
  707. msleep(3);
  708. r8a66597_bset(r8a66597, PLLC, SYSCFG0);
  709. msleep(1);
  710. r8a66597_bset(r8a66597, SCKE, SYSCFG0);
  711. r8a66597_bset(r8a66597, irq_sense, INTENB1);
  712. r8a66597_write(r8a66597, BURST | CPU_ADR_RD_WR,
  713. DMA0CFG);
  714. }
  715. }
  716. static void disable_controller(struct r8a66597 *r8a66597)
  717. {
  718. if (r8a66597->pdata->on_chip) {
  719. r8a66597_bset(r8a66597, SCKE, SYSCFG0);
  720. r8a66597_bclr(r8a66597, UTST, TESTMODE);
  721. /* disable interrupts */
  722. r8a66597_write(r8a66597, 0, INTENB0);
  723. r8a66597_write(r8a66597, 0, INTENB1);
  724. r8a66597_write(r8a66597, 0, BRDYENB);
  725. r8a66597_write(r8a66597, 0, BEMPENB);
  726. r8a66597_write(r8a66597, 0, NRDYENB);
  727. /* clear status */
  728. r8a66597_write(r8a66597, 0, BRDYSTS);
  729. r8a66597_write(r8a66597, 0, NRDYSTS);
  730. r8a66597_write(r8a66597, 0, BEMPSTS);
  731. r8a66597_bclr(r8a66597, USBE, SYSCFG0);
  732. r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
  733. } else {
  734. r8a66597_bclr(r8a66597, UTST, TESTMODE);
  735. r8a66597_bclr(r8a66597, SCKE, SYSCFG0);
  736. udelay(1);
  737. r8a66597_bclr(r8a66597, PLLC, SYSCFG0);
  738. udelay(1);
  739. udelay(1);
  740. r8a66597_bclr(r8a66597, XCKE, SYSCFG0);
  741. }
  742. }
  743. static void r8a66597_start_xclock(struct r8a66597 *r8a66597)
  744. {
  745. u16 tmp;
  746. if (!r8a66597->pdata->on_chip) {
  747. tmp = r8a66597_read(r8a66597, SYSCFG0);
  748. if (!(tmp & XCKE))
  749. r8a66597_bset(r8a66597, XCKE, SYSCFG0);
  750. }
  751. }
  752. static struct r8a66597_request *get_request_from_ep(struct r8a66597_ep *ep)
  753. {
  754. return list_entry(ep->queue.next, struct r8a66597_request, queue);
  755. }
  756. /*-------------------------------------------------------------------------*/
  757. static void transfer_complete(struct r8a66597_ep *ep,
  758. struct r8a66597_request *req, int status)
  759. __releases(r8a66597->lock)
  760. __acquires(r8a66597->lock)
  761. {
  762. int restart = 0;
  763. if (unlikely(ep->pipenum == 0)) {
  764. if (ep->internal_ccpl) {
  765. ep->internal_ccpl = 0;
  766. return;
  767. }
  768. }
  769. list_del_init(&req->queue);
  770. if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
  771. req->req.status = -ESHUTDOWN;
  772. else
  773. req->req.status = status;
  774. if (!list_empty(&ep->queue))
  775. restart = 1;
  776. if (ep->use_dma)
  777. sudmac_free_channel(ep->r8a66597, ep, req);
  778. spin_unlock(&ep->r8a66597->lock);
  779. req->req.complete(&ep->ep, &req->req);
  780. spin_lock(&ep->r8a66597->lock);
  781. if (restart) {
  782. req = get_request_from_ep(ep);
  783. if (ep->desc)
  784. start_packet(ep, req);
  785. }
  786. }
  787. static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req)
  788. {
  789. int i;
  790. u16 tmp;
  791. unsigned bufsize;
  792. size_t size;
  793. void *buf;
  794. u16 pipenum = ep->pipenum;
  795. struct r8a66597 *r8a66597 = ep->r8a66597;
  796. pipe_change(r8a66597, pipenum);
  797. r8a66597_bset(r8a66597, ISEL, ep->fifosel);
  798. i = 0;
  799. do {
  800. tmp = r8a66597_read(r8a66597, ep->fifoctr);
  801. if (i++ > 100000) {
  802. dev_err(r8a66597_to_dev(r8a66597),
  803. "pipe0 is busy. maybe cpu i/o bus "
  804. "conflict. please power off this controller.");
  805. return;
  806. }
  807. ndelay(1);
  808. } while ((tmp & FRDY) == 0);
  809. /* prepare parameters */
  810. bufsize = get_buffer_size(r8a66597, pipenum);
  811. buf = req->req.buf + req->req.actual;
  812. size = min(bufsize, req->req.length - req->req.actual);
  813. /* write fifo */
  814. if (req->req.buf) {
  815. if (size > 0)
  816. r8a66597_write_fifo(r8a66597, ep, buf, size);
  817. if ((size == 0) || ((size % ep->ep.maxpacket) != 0))
  818. r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
  819. }
  820. /* update parameters */
  821. req->req.actual += size;
  822. /* check transfer finish */
  823. if ((!req->req.zero && (req->req.actual == req->req.length))
  824. || (size % ep->ep.maxpacket)
  825. || (size == 0)) {
  826. disable_irq_ready(r8a66597, pipenum);
  827. disable_irq_empty(r8a66597, pipenum);
  828. } else {
  829. disable_irq_ready(r8a66597, pipenum);
  830. enable_irq_empty(r8a66597, pipenum);
  831. }
  832. pipe_start(r8a66597, pipenum);
  833. }
  834. static void irq_packet_write(struct r8a66597_ep *ep,
  835. struct r8a66597_request *req)
  836. {
  837. u16 tmp;
  838. unsigned bufsize;
  839. size_t size;
  840. void *buf;
  841. u16 pipenum = ep->pipenum;
  842. struct r8a66597 *r8a66597 = ep->r8a66597;
  843. pipe_change(r8a66597, pipenum);
  844. tmp = r8a66597_read(r8a66597, ep->fifoctr);
  845. if (unlikely((tmp & FRDY) == 0)) {
  846. pipe_stop(r8a66597, pipenum);
  847. pipe_irq_disable(r8a66597, pipenum);
  848. dev_err(r8a66597_to_dev(r8a66597),
  849. "write fifo not ready. pipnum=%d\n", pipenum);
  850. return;
  851. }
  852. /* prepare parameters */
  853. bufsize = get_buffer_size(r8a66597, pipenum);
  854. buf = req->req.buf + req->req.actual;
  855. size = min(bufsize, req->req.length - req->req.actual);
  856. /* write fifo */
  857. if (req->req.buf) {
  858. r8a66597_write_fifo(r8a66597, ep, buf, size);
  859. if ((size == 0)
  860. || ((size % ep->ep.maxpacket) != 0)
  861. || ((bufsize != ep->ep.maxpacket)
  862. && (bufsize > size)))
  863. r8a66597_bset(r8a66597, BVAL, ep->fifoctr);
  864. }
  865. /* update parameters */
  866. req->req.actual += size;
  867. /* check transfer finish */
  868. if ((!req->req.zero && (req->req.actual == req->req.length))
  869. || (size % ep->ep.maxpacket)
  870. || (size == 0)) {
  871. disable_irq_ready(r8a66597, pipenum);
  872. enable_irq_empty(r8a66597, pipenum);
  873. } else {
  874. disable_irq_empty(r8a66597, pipenum);
  875. pipe_irq_enable(r8a66597, pipenum);
  876. }
  877. }
  878. static void irq_packet_read(struct r8a66597_ep *ep,
  879. struct r8a66597_request *req)
  880. {
  881. u16 tmp;
  882. int rcv_len, bufsize, req_len;
  883. int size;
  884. void *buf;
  885. u16 pipenum = ep->pipenum;
  886. struct r8a66597 *r8a66597 = ep->r8a66597;
  887. int finish = 0;
  888. pipe_change(r8a66597, pipenum);
  889. tmp = r8a66597_read(r8a66597, ep->fifoctr);
  890. if (unlikely((tmp & FRDY) == 0)) {
  891. req->req.status = -EPIPE;
  892. pipe_stop(r8a66597, pipenum);
  893. pipe_irq_disable(r8a66597, pipenum);
  894. dev_err(r8a66597_to_dev(r8a66597), "read fifo not ready");
  895. return;
  896. }
  897. /* prepare parameters */
  898. rcv_len = tmp & DTLN;
  899. bufsize = get_buffer_size(r8a66597, pipenum);
  900. buf = req->req.buf + req->req.actual;
  901. req_len = req->req.length - req->req.actual;
  902. if (rcv_len < bufsize)
  903. size = min(rcv_len, req_len);
  904. else
  905. size = min(bufsize, req_len);
  906. /* update parameters */
  907. req->req.actual += size;
  908. /* check transfer finish */
  909. if ((!req->req.zero && (req->req.actual == req->req.length))
  910. || (size % ep->ep.maxpacket)
  911. || (size == 0)) {
  912. pipe_stop(r8a66597, pipenum);
  913. pipe_irq_disable(r8a66597, pipenum);
  914. finish = 1;
  915. }
  916. /* read fifo */
  917. if (req->req.buf) {
  918. if (size == 0)
  919. r8a66597_write(r8a66597, BCLR, ep->fifoctr);
  920. else
  921. r8a66597_read_fifo(r8a66597, ep->fifoaddr, buf, size);
  922. }
  923. if ((ep->pipenum != 0) && finish)
  924. transfer_complete(ep, req, 0);
  925. }
  926. static void irq_pipe_ready(struct r8a66597 *r8a66597, u16 status, u16 enb)
  927. {
  928. u16 check;
  929. u16 pipenum;
  930. struct r8a66597_ep *ep;
  931. struct r8a66597_request *req;
  932. if ((status & BRDY0) && (enb & BRDY0)) {
  933. r8a66597_write(r8a66597, ~BRDY0, BRDYSTS);
  934. r8a66597_mdfy(r8a66597, 0, CURPIPE, CFIFOSEL);
  935. ep = &r8a66597->ep[0];
  936. req = get_request_from_ep(ep);
  937. irq_packet_read(ep, req);
  938. } else {
  939. for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
  940. check = 1 << pipenum;
  941. if ((status & check) && (enb & check)) {
  942. r8a66597_write(r8a66597, ~check, BRDYSTS);
  943. ep = r8a66597->pipenum2ep[pipenum];
  944. req = get_request_from_ep(ep);
  945. if (ep->desc->bEndpointAddress & USB_DIR_IN)
  946. irq_packet_write(ep, req);
  947. else
  948. irq_packet_read(ep, req);
  949. }
  950. }
  951. }
  952. }
  953. static void irq_pipe_empty(struct r8a66597 *r8a66597, u16 status, u16 enb)
  954. {
  955. u16 tmp;
  956. u16 check;
  957. u16 pipenum;
  958. struct r8a66597_ep *ep;
  959. struct r8a66597_request *req;
  960. if ((status & BEMP0) && (enb & BEMP0)) {
  961. r8a66597_write(r8a66597, ~BEMP0, BEMPSTS);
  962. ep = &r8a66597->ep[0];
  963. req = get_request_from_ep(ep);
  964. irq_ep0_write(ep, req);
  965. } else {
  966. for (pipenum = 1; pipenum < R8A66597_MAX_NUM_PIPE; pipenum++) {
  967. check = 1 << pipenum;
  968. if ((status & check) && (enb & check)) {
  969. r8a66597_write(r8a66597, ~check, BEMPSTS);
  970. tmp = control_reg_get(r8a66597, pipenum);
  971. if ((tmp & INBUFM) == 0) {
  972. disable_irq_empty(r8a66597, pipenum);
  973. pipe_irq_disable(r8a66597, pipenum);
  974. pipe_stop(r8a66597, pipenum);
  975. ep = r8a66597->pipenum2ep[pipenum];
  976. req = get_request_from_ep(ep);
  977. if (!list_empty(&ep->queue))
  978. transfer_complete(ep, req, 0);
  979. }
  980. }
  981. }
  982. }
  983. }
  984. static void get_status(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
  985. __releases(r8a66597->lock)
  986. __acquires(r8a66597->lock)
  987. {
  988. struct r8a66597_ep *ep;
  989. u16 pid;
  990. u16 status = 0;
  991. u16 w_index = le16_to_cpu(ctrl->wIndex);
  992. switch (ctrl->bRequestType & USB_RECIP_MASK) {
  993. case USB_RECIP_DEVICE:
  994. status = 1 << USB_DEVICE_SELF_POWERED;
  995. break;
  996. case USB_RECIP_INTERFACE:
  997. status = 0;
  998. break;
  999. case USB_RECIP_ENDPOINT:
  1000. ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
  1001. pid = control_reg_get_pid(r8a66597, ep->pipenum);
  1002. if (pid == PID_STALL)
  1003. status = 1 << USB_ENDPOINT_HALT;
  1004. else
  1005. status = 0;
  1006. break;
  1007. default:
  1008. pipe_stall(r8a66597, 0);
  1009. return; /* exit */
  1010. }
  1011. r8a66597->ep0_data = cpu_to_le16(status);
  1012. r8a66597->ep0_req->buf = &r8a66597->ep0_data;
  1013. r8a66597->ep0_req->length = 2;
  1014. /* AV: what happens if we get called again before that gets through? */
  1015. spin_unlock(&r8a66597->lock);
  1016. r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
  1017. spin_lock(&r8a66597->lock);
  1018. }
  1019. static void clear_feature(struct r8a66597 *r8a66597,
  1020. struct usb_ctrlrequest *ctrl)
  1021. {
  1022. switch (ctrl->bRequestType & USB_RECIP_MASK) {
  1023. case USB_RECIP_DEVICE:
  1024. control_end(r8a66597, 1);
  1025. break;
  1026. case USB_RECIP_INTERFACE:
  1027. control_end(r8a66597, 1);
  1028. break;
  1029. case USB_RECIP_ENDPOINT: {
  1030. struct r8a66597_ep *ep;
  1031. struct r8a66597_request *req;
  1032. u16 w_index = le16_to_cpu(ctrl->wIndex);
  1033. ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
  1034. if (!ep->wedge) {
  1035. pipe_stop(r8a66597, ep->pipenum);
  1036. control_reg_sqclr(r8a66597, ep->pipenum);
  1037. spin_unlock(&r8a66597->lock);
  1038. usb_ep_clear_halt(&ep->ep);
  1039. spin_lock(&r8a66597->lock);
  1040. }
  1041. control_end(r8a66597, 1);
  1042. req = get_request_from_ep(ep);
  1043. if (ep->busy) {
  1044. ep->busy = 0;
  1045. if (list_empty(&ep->queue))
  1046. break;
  1047. start_packet(ep, req);
  1048. } else if (!list_empty(&ep->queue))
  1049. pipe_start(r8a66597, ep->pipenum);
  1050. }
  1051. break;
  1052. default:
  1053. pipe_stall(r8a66597, 0);
  1054. break;
  1055. }
  1056. }
  1057. static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
  1058. {
  1059. u16 tmp;
  1060. int timeout = 3000;
  1061. switch (ctrl->bRequestType & USB_RECIP_MASK) {
  1062. case USB_RECIP_DEVICE:
  1063. switch (le16_to_cpu(ctrl->wValue)) {
  1064. case USB_DEVICE_TEST_MODE:
  1065. control_end(r8a66597, 1);
  1066. /* Wait for the completion of status stage */
  1067. do {
  1068. tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
  1069. udelay(1);
  1070. } while (tmp != CS_IDST || timeout-- > 0);
  1071. if (tmp == CS_IDST)
  1072. r8a66597_bset(r8a66597,
  1073. le16_to_cpu(ctrl->wIndex >> 8),
  1074. TESTMODE);
  1075. break;
  1076. default:
  1077. pipe_stall(r8a66597, 0);
  1078. break;
  1079. }
  1080. break;
  1081. case USB_RECIP_INTERFACE:
  1082. control_end(r8a66597, 1);
  1083. break;
  1084. case USB_RECIP_ENDPOINT: {
  1085. struct r8a66597_ep *ep;
  1086. u16 w_index = le16_to_cpu(ctrl->wIndex);
  1087. ep = r8a66597->epaddr2ep[w_index & USB_ENDPOINT_NUMBER_MASK];
  1088. pipe_stall(r8a66597, ep->pipenum);
  1089. control_end(r8a66597, 1);
  1090. }
  1091. break;
  1092. default:
  1093. pipe_stall(r8a66597, 0);
  1094. break;
  1095. }
  1096. }
  1097. /* if return value is true, call class driver's setup() */
  1098. static int setup_packet(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
  1099. {
  1100. u16 *p = (u16 *)ctrl;
  1101. unsigned long offset = USBREQ;
  1102. int i, ret = 0;
  1103. /* read fifo */
  1104. r8a66597_write(r8a66597, ~VALID, INTSTS0);
  1105. for (i = 0; i < 4; i++)
  1106. p[i] = r8a66597_read(r8a66597, offset + i*2);
  1107. /* check request */
  1108. if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  1109. switch (ctrl->bRequest) {
  1110. case USB_REQ_GET_STATUS:
  1111. get_status(r8a66597, ctrl);
  1112. break;
  1113. case USB_REQ_CLEAR_FEATURE:
  1114. clear_feature(r8a66597, ctrl);
  1115. break;
  1116. case USB_REQ_SET_FEATURE:
  1117. set_feature(r8a66597, ctrl);
  1118. break;
  1119. default:
  1120. ret = 1;
  1121. break;
  1122. }
  1123. } else
  1124. ret = 1;
  1125. return ret;
  1126. }
  1127. static void r8a66597_update_usb_speed(struct r8a66597 *r8a66597)
  1128. {
  1129. u16 speed = get_usb_speed(r8a66597);
  1130. switch (speed) {
  1131. case HSMODE:
  1132. r8a66597->gadget.speed = USB_SPEED_HIGH;
  1133. break;
  1134. case FSMODE:
  1135. r8a66597->gadget.speed = USB_SPEED_FULL;
  1136. break;
  1137. default:
  1138. r8a66597->gadget.speed = USB_SPEED_UNKNOWN;
  1139. dev_err(r8a66597_to_dev(r8a66597), "USB speed unknown\n");
  1140. }
  1141. }
  1142. static void irq_device_state(struct r8a66597 *r8a66597)
  1143. {
  1144. u16 dvsq;
  1145. dvsq = r8a66597_read(r8a66597, INTSTS0) & DVSQ;
  1146. r8a66597_write(r8a66597, ~DVST, INTSTS0);
  1147. if (dvsq == DS_DFLT) {
  1148. /* bus reset */
  1149. spin_unlock(&r8a66597->lock);
  1150. r8a66597->driver->disconnect(&r8a66597->gadget);
  1151. spin_lock(&r8a66597->lock);
  1152. r8a66597_update_usb_speed(r8a66597);
  1153. }
  1154. if (r8a66597->old_dvsq == DS_CNFG && dvsq != DS_CNFG)
  1155. r8a66597_update_usb_speed(r8a66597);
  1156. if ((dvsq == DS_CNFG || dvsq == DS_ADDS)
  1157. && r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
  1158. r8a66597_update_usb_speed(r8a66597);
  1159. r8a66597->old_dvsq = dvsq;
  1160. }
  1161. static void irq_control_stage(struct r8a66597 *r8a66597)
  1162. __releases(r8a66597->lock)
  1163. __acquires(r8a66597->lock)
  1164. {
  1165. struct usb_ctrlrequest ctrl;
  1166. u16 ctsq;
  1167. ctsq = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
  1168. r8a66597_write(r8a66597, ~CTRT, INTSTS0);
  1169. switch (ctsq) {
  1170. case CS_IDST: {
  1171. struct r8a66597_ep *ep;
  1172. struct r8a66597_request *req;
  1173. ep = &r8a66597->ep[0];
  1174. req = get_request_from_ep(ep);
  1175. transfer_complete(ep, req, 0);
  1176. }
  1177. break;
  1178. case CS_RDDS:
  1179. case CS_WRDS:
  1180. case CS_WRND:
  1181. if (setup_packet(r8a66597, &ctrl)) {
  1182. spin_unlock(&r8a66597->lock);
  1183. if (r8a66597->driver->setup(&r8a66597->gadget, &ctrl)
  1184. < 0)
  1185. pipe_stall(r8a66597, 0);
  1186. spin_lock(&r8a66597->lock);
  1187. }
  1188. break;
  1189. case CS_RDSS:
  1190. case CS_WRSS:
  1191. control_end(r8a66597, 0);
  1192. break;
  1193. default:
  1194. dev_err(r8a66597_to_dev(r8a66597),
  1195. "ctrl_stage: unexpect ctsq(%x)\n", ctsq);
  1196. break;
  1197. }
  1198. }
  1199. static void sudmac_finish(struct r8a66597 *r8a66597, struct r8a66597_ep *ep)
  1200. {
  1201. u16 pipenum;
  1202. struct r8a66597_request *req;
  1203. u32 len;
  1204. int i = 0;
  1205. pipenum = ep->pipenum;
  1206. pipe_change(r8a66597, pipenum);
  1207. while (!(r8a66597_read(r8a66597, ep->fifoctr) & FRDY)) {
  1208. udelay(1);
  1209. if (unlikely(i++ >= 10000)) { /* timeout = 10 msec */
  1210. dev_err(r8a66597_to_dev(r8a66597),
  1211. "%s: FRDY was not set (%d)\n",
  1212. __func__, pipenum);
  1213. return;
  1214. }
  1215. }
  1216. r8a66597_bset(r8a66597, BCLR, ep->fifoctr);
  1217. req = get_request_from_ep(ep);
  1218. /* prepare parameters */
  1219. len = r8a66597_sudmac_read(r8a66597, CH0CBC);
  1220. req->req.actual += len;
  1221. /* clear */
  1222. r8a66597_sudmac_write(r8a66597, CH0STCLR, DSTSCLR);
  1223. /* check transfer finish */
  1224. if ((!req->req.zero && (req->req.actual == req->req.length))
  1225. || (len % ep->ep.maxpacket)) {
  1226. if (ep->dma->dir) {
  1227. disable_irq_ready(r8a66597, pipenum);
  1228. enable_irq_empty(r8a66597, pipenum);
  1229. } else {
  1230. /* Clear the interrupt flag for next transfer */
  1231. r8a66597_write(r8a66597, ~(1 << pipenum), BRDYSTS);
  1232. transfer_complete(ep, req, 0);
  1233. }
  1234. }
  1235. }
  1236. static void r8a66597_sudmac_irq(struct r8a66597 *r8a66597)
  1237. {
  1238. u32 irqsts;
  1239. struct r8a66597_ep *ep;
  1240. u16 pipenum;
  1241. irqsts = r8a66597_sudmac_read(r8a66597, DINTSTS);
  1242. if (irqsts & CH0ENDS) {
  1243. r8a66597_sudmac_write(r8a66597, CH0ENDC, DINTSTSCLR);
  1244. pipenum = (r8a66597_read(r8a66597, D0FIFOSEL) & CURPIPE);
  1245. ep = r8a66597->pipenum2ep[pipenum];
  1246. sudmac_finish(r8a66597, ep);
  1247. }
  1248. }
  1249. static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
  1250. {
  1251. struct r8a66597 *r8a66597 = _r8a66597;
  1252. u16 intsts0;
  1253. u16 intenb0;
  1254. u16 brdysts, nrdysts, bempsts;
  1255. u16 brdyenb, nrdyenb, bempenb;
  1256. u16 savepipe;
  1257. u16 mask0;
  1258. if (r8a66597_is_sudmac(r8a66597))
  1259. r8a66597_sudmac_irq(r8a66597);
  1260. spin_lock(&r8a66597->lock);
  1261. intsts0 = r8a66597_read(r8a66597, INTSTS0);
  1262. intenb0 = r8a66597_read(r8a66597, INTENB0);
  1263. savepipe = r8a66597_read(r8a66597, CFIFOSEL);
  1264. mask0 = intsts0 & intenb0;
  1265. if (mask0) {
  1266. brdysts = r8a66597_read(r8a66597, BRDYSTS);
  1267. nrdysts = r8a66597_read(r8a66597, NRDYSTS);
  1268. bempsts = r8a66597_read(r8a66597, BEMPSTS);
  1269. brdyenb = r8a66597_read(r8a66597, BRDYENB);
  1270. nrdyenb = r8a66597_read(r8a66597, NRDYENB);
  1271. bempenb = r8a66597_read(r8a66597, BEMPENB);
  1272. if (mask0 & VBINT) {
  1273. r8a66597_write(r8a66597, 0xffff & ~VBINT,
  1274. INTSTS0);
  1275. r8a66597_start_xclock(r8a66597);
  1276. /* start vbus sampling */
  1277. r8a66597->old_vbus = r8a66597_read(r8a66597, INTSTS0)
  1278. & VBSTS;
  1279. r8a66597->scount = R8A66597_MAX_SAMPLING;
  1280. mod_timer(&r8a66597->timer,
  1281. jiffies + msecs_to_jiffies(50));
  1282. }
  1283. if (intsts0 & DVSQ)
  1284. irq_device_state(r8a66597);
  1285. if ((intsts0 & BRDY) && (intenb0 & BRDYE)
  1286. && (brdysts & brdyenb))
  1287. irq_pipe_ready(r8a66597, brdysts, brdyenb);
  1288. if ((intsts0 & BEMP) && (intenb0 & BEMPE)
  1289. && (bempsts & bempenb))
  1290. irq_pipe_empty(r8a66597, bempsts, bempenb);
  1291. if (intsts0 & CTRT)
  1292. irq_control_stage(r8a66597);
  1293. }
  1294. r8a66597_write(r8a66597, savepipe, CFIFOSEL);
  1295. spin_unlock(&r8a66597->lock);
  1296. return IRQ_HANDLED;
  1297. }
  1298. static void r8a66597_timer(unsigned long _r8a66597)
  1299. {
  1300. struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
  1301. unsigned long flags;
  1302. u16 tmp;
  1303. spin_lock_irqsave(&r8a66597->lock, flags);
  1304. tmp = r8a66597_read(r8a66597, SYSCFG0);
  1305. if (r8a66597->scount > 0) {
  1306. tmp = r8a66597_read(r8a66597, INTSTS0) & VBSTS;
  1307. if (tmp == r8a66597->old_vbus) {
  1308. r8a66597->scount--;
  1309. if (r8a66597->scount == 0) {
  1310. if (tmp == VBSTS)
  1311. r8a66597_usb_connect(r8a66597);
  1312. else
  1313. r8a66597_usb_disconnect(r8a66597);
  1314. } else {
  1315. mod_timer(&r8a66597->timer,
  1316. jiffies + msecs_to_jiffies(50));
  1317. }
  1318. } else {
  1319. r8a66597->scount = R8A66597_MAX_SAMPLING;
  1320. r8a66597->old_vbus = tmp;
  1321. mod_timer(&r8a66597->timer,
  1322. jiffies + msecs_to_jiffies(50));
  1323. }
  1324. }
  1325. spin_unlock_irqrestore(&r8a66597->lock, flags);
  1326. }
  1327. /*-------------------------------------------------------------------------*/
  1328. static int r8a66597_enable(struct usb_ep *_ep,
  1329. const struct usb_endpoint_descriptor *desc)
  1330. {
  1331. struct r8a66597_ep *ep;
  1332. ep = container_of(_ep, struct r8a66597_ep, ep);
  1333. return alloc_pipe_config(ep, desc);
  1334. }
  1335. static int r8a66597_disable(struct usb_ep *_ep)
  1336. {
  1337. struct r8a66597_ep *ep;
  1338. struct r8a66597_request *req;
  1339. unsigned long flags;
  1340. ep = container_of(_ep, struct r8a66597_ep, ep);
  1341. BUG_ON(!ep);
  1342. while (!list_empty(&ep->queue)) {
  1343. req = get_request_from_ep(ep);
  1344. spin_lock_irqsave(&ep->r8a66597->lock, flags);
  1345. transfer_complete(ep, req, -ECONNRESET);
  1346. spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
  1347. }
  1348. pipe_irq_disable(ep->r8a66597, ep->pipenum);
  1349. return free_pipe_config(ep);
  1350. }
  1351. static struct usb_request *r8a66597_alloc_request(struct usb_ep *_ep,
  1352. gfp_t gfp_flags)
  1353. {
  1354. struct r8a66597_request *req;
  1355. req = kzalloc(sizeof(struct r8a66597_request), gfp_flags);
  1356. if (!req)
  1357. return NULL;
  1358. INIT_LIST_HEAD(&req->queue);
  1359. return &req->req;
  1360. }
  1361. static void r8a66597_free_request(struct usb_ep *_ep, struct usb_request *_req)
  1362. {
  1363. struct r8a66597_request *req;
  1364. req = container_of(_req, struct r8a66597_request, req);
  1365. kfree(req);
  1366. }
  1367. static int r8a66597_queue(struct usb_ep *_ep, struct usb_request *_req,
  1368. gfp_t gfp_flags)
  1369. {
  1370. struct r8a66597_ep *ep;
  1371. struct r8a66597_request *req;
  1372. unsigned long flags;
  1373. int request = 0;
  1374. ep = container_of(_ep, struct r8a66597_ep, ep);
  1375. req = container_of(_req, struct r8a66597_request, req);
  1376. if (ep->r8a66597->gadget.speed == USB_SPEED_UNKNOWN)
  1377. return -ESHUTDOWN;
  1378. spin_lock_irqsave(&ep->r8a66597->lock, flags);
  1379. if (list_empty(&ep->queue))
  1380. request = 1;
  1381. list_add_tail(&req->queue, &ep->queue);
  1382. req->req.actual = 0;
  1383. req->req.status = -EINPROGRESS;
  1384. if (ep->desc == NULL) /* control */
  1385. start_ep0(ep, req);
  1386. else {
  1387. if (request && !ep->busy)
  1388. start_packet(ep, req);
  1389. }
  1390. spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
  1391. return 0;
  1392. }
  1393. static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1394. {
  1395. struct r8a66597_ep *ep;
  1396. struct r8a66597_request *req;
  1397. unsigned long flags;
  1398. ep = container_of(_ep, struct r8a66597_ep, ep);
  1399. req = container_of(_req, struct r8a66597_request, req);
  1400. spin_lock_irqsave(&ep->r8a66597->lock, flags);
  1401. if (!list_empty(&ep->queue))
  1402. transfer_complete(ep, req, -ECONNRESET);
  1403. spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
  1404. return 0;
  1405. }
  1406. static int r8a66597_set_halt(struct usb_ep *_ep, int value)
  1407. {
  1408. struct r8a66597_ep *ep;
  1409. struct r8a66597_request *req;
  1410. unsigned long flags;
  1411. int ret = 0;
  1412. ep = container_of(_ep, struct r8a66597_ep, ep);
  1413. req = get_request_from_ep(ep);
  1414. spin_lock_irqsave(&ep->r8a66597->lock, flags);
  1415. if (!list_empty(&ep->queue)) {
  1416. ret = -EAGAIN;
  1417. goto out;
  1418. }
  1419. if (value) {
  1420. ep->busy = 1;
  1421. pipe_stall(ep->r8a66597, ep->pipenum);
  1422. } else {
  1423. ep->busy = 0;
  1424. ep->wedge = 0;
  1425. pipe_stop(ep->r8a66597, ep->pipenum);
  1426. }
  1427. out:
  1428. spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
  1429. return ret;
  1430. }
  1431. static int r8a66597_set_wedge(struct usb_ep *_ep)
  1432. {
  1433. struct r8a66597_ep *ep;
  1434. unsigned long flags;
  1435. ep = container_of(_ep, struct r8a66597_ep, ep);
  1436. if (!ep || !ep->desc)
  1437. return -EINVAL;
  1438. spin_lock_irqsave(&ep->r8a66597->lock, flags);
  1439. ep->wedge = 1;
  1440. spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
  1441. return usb_ep_set_halt(_ep);
  1442. }
  1443. static void r8a66597_fifo_flush(struct usb_ep *_ep)
  1444. {
  1445. struct r8a66597_ep *ep;
  1446. unsigned long flags;
  1447. ep = container_of(_ep, struct r8a66597_ep, ep);
  1448. spin_lock_irqsave(&ep->r8a66597->lock, flags);
  1449. if (list_empty(&ep->queue) && !ep->busy) {
  1450. pipe_stop(ep->r8a66597, ep->pipenum);
  1451. r8a66597_bclr(ep->r8a66597, BCLR, ep->fifoctr);
  1452. r8a66597_write(ep->r8a66597, ACLRM, ep->pipectr);
  1453. r8a66597_write(ep->r8a66597, 0, ep->pipectr);
  1454. }
  1455. spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
  1456. }
  1457. static struct usb_ep_ops r8a66597_ep_ops = {
  1458. .enable = r8a66597_enable,
  1459. .disable = r8a66597_disable,
  1460. .alloc_request = r8a66597_alloc_request,
  1461. .free_request = r8a66597_free_request,
  1462. .queue = r8a66597_queue,
  1463. .dequeue = r8a66597_dequeue,
  1464. .set_halt = r8a66597_set_halt,
  1465. .set_wedge = r8a66597_set_wedge,
  1466. .fifo_flush = r8a66597_fifo_flush,
  1467. };
  1468. /*-------------------------------------------------------------------------*/
  1469. static int r8a66597_start(struct usb_gadget *gadget,
  1470. struct usb_gadget_driver *driver)
  1471. {
  1472. struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
  1473. if (!driver
  1474. || driver->max_speed < USB_SPEED_HIGH
  1475. || !driver->setup)
  1476. return -EINVAL;
  1477. if (!r8a66597)
  1478. return -ENODEV;
  1479. /* hook up the driver */
  1480. r8a66597->driver = driver;
  1481. init_controller(r8a66597);
  1482. r8a66597_bset(r8a66597, VBSE, INTENB0);
  1483. if (r8a66597_read(r8a66597, INTSTS0) & VBSTS) {
  1484. r8a66597_start_xclock(r8a66597);
  1485. /* start vbus sampling */
  1486. r8a66597->old_vbus = r8a66597_read(r8a66597,
  1487. INTSTS0) & VBSTS;
  1488. r8a66597->scount = R8A66597_MAX_SAMPLING;
  1489. mod_timer(&r8a66597->timer, jiffies + msecs_to_jiffies(50));
  1490. }
  1491. return 0;
  1492. }
  1493. static int r8a66597_stop(struct usb_gadget *gadget,
  1494. struct usb_gadget_driver *driver)
  1495. {
  1496. struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
  1497. unsigned long flags;
  1498. spin_lock_irqsave(&r8a66597->lock, flags);
  1499. r8a66597_bclr(r8a66597, VBSE, INTENB0);
  1500. disable_controller(r8a66597);
  1501. spin_unlock_irqrestore(&r8a66597->lock, flags);
  1502. r8a66597->driver = NULL;
  1503. return 0;
  1504. }
  1505. /*-------------------------------------------------------------------------*/
  1506. static int r8a66597_get_frame(struct usb_gadget *_gadget)
  1507. {
  1508. struct r8a66597 *r8a66597 = gadget_to_r8a66597(_gadget);
  1509. return r8a66597_read(r8a66597, FRMNUM) & 0x03FF;
  1510. }
  1511. static int r8a66597_pullup(struct usb_gadget *gadget, int is_on)
  1512. {
  1513. struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
  1514. unsigned long flags;
  1515. spin_lock_irqsave(&r8a66597->lock, flags);
  1516. if (is_on)
  1517. r8a66597_bset(r8a66597, DPRPU, SYSCFG0);
  1518. else
  1519. r8a66597_bclr(r8a66597, DPRPU, SYSCFG0);
  1520. spin_unlock_irqrestore(&r8a66597->lock, flags);
  1521. return 0;
  1522. }
  1523. static struct usb_gadget_ops r8a66597_gadget_ops = {
  1524. .get_frame = r8a66597_get_frame,
  1525. .udc_start = r8a66597_start,
  1526. .udc_stop = r8a66597_stop,
  1527. .pullup = r8a66597_pullup,
  1528. };
  1529. static int __exit r8a66597_remove(struct platform_device *pdev)
  1530. {
  1531. struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev);
  1532. usb_del_gadget_udc(&r8a66597->gadget);
  1533. del_timer_sync(&r8a66597->timer);
  1534. iounmap(r8a66597->reg);
  1535. if (r8a66597->pdata->sudmac)
  1536. iounmap(r8a66597->sudmac_reg);
  1537. free_irq(platform_get_irq(pdev, 0), r8a66597);
  1538. r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
  1539. #ifdef CONFIG_HAVE_CLK
  1540. if (r8a66597->pdata->on_chip) {
  1541. clk_disable(r8a66597->clk);
  1542. clk_put(r8a66597->clk);
  1543. }
  1544. #endif
  1545. device_unregister(&r8a66597->gadget.dev);
  1546. kfree(r8a66597);
  1547. return 0;
  1548. }
  1549. static void nop_completion(struct usb_ep *ep, struct usb_request *r)
  1550. {
  1551. }
  1552. static int __init r8a66597_sudmac_ioremap(struct r8a66597 *r8a66597,
  1553. struct platform_device *pdev)
  1554. {
  1555. struct resource *res;
  1556. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sudmac");
  1557. if (!res) {
  1558. dev_err(&pdev->dev, "platform_get_resource error(sudmac).\n");
  1559. return -ENODEV;
  1560. }
  1561. r8a66597->sudmac_reg = ioremap(res->start, resource_size(res));
  1562. if (r8a66597->sudmac_reg == NULL) {
  1563. dev_err(&pdev->dev, "ioremap error(sudmac).\n");
  1564. return -ENOMEM;
  1565. }
  1566. return 0;
  1567. }
  1568. static int __init r8a66597_probe(struct platform_device *pdev)
  1569. {
  1570. #ifdef CONFIG_HAVE_CLK
  1571. char clk_name[8];
  1572. #endif
  1573. struct resource *res, *ires;
  1574. int irq;
  1575. void __iomem *reg = NULL;
  1576. struct r8a66597 *r8a66597 = NULL;
  1577. int ret = 0;
  1578. int i;
  1579. unsigned long irq_trigger;
  1580. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1581. if (!res) {
  1582. ret = -ENODEV;
  1583. dev_err(&pdev->dev, "platform_get_resource error.\n");
  1584. goto clean_up;
  1585. }
  1586. ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1587. irq = ires->start;
  1588. irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
  1589. if (irq < 0) {
  1590. ret = -ENODEV;
  1591. dev_err(&pdev->dev, "platform_get_irq error.\n");
  1592. goto clean_up;
  1593. }
  1594. reg = ioremap(res->start, resource_size(res));
  1595. if (reg == NULL) {
  1596. ret = -ENOMEM;
  1597. dev_err(&pdev->dev, "ioremap error.\n");
  1598. goto clean_up;
  1599. }
  1600. /* initialize ucd */
  1601. r8a66597 = kzalloc(sizeof(struct r8a66597), GFP_KERNEL);
  1602. if (r8a66597 == NULL) {
  1603. ret = -ENOMEM;
  1604. dev_err(&pdev->dev, "kzalloc error\n");
  1605. goto clean_up;
  1606. }
  1607. spin_lock_init(&r8a66597->lock);
  1608. dev_set_drvdata(&pdev->dev, r8a66597);
  1609. r8a66597->pdata = pdev->dev.platform_data;
  1610. r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
  1611. r8a66597->gadget.ops = &r8a66597_gadget_ops;
  1612. dev_set_name(&r8a66597->gadget.dev, "gadget");
  1613. r8a66597->gadget.max_speed = USB_SPEED_HIGH;
  1614. r8a66597->gadget.dev.parent = &pdev->dev;
  1615. r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
  1616. r8a66597->gadget.dev.release = pdev->dev.release;
  1617. r8a66597->gadget.name = udc_name;
  1618. ret = device_register(&r8a66597->gadget.dev);
  1619. if (ret < 0) {
  1620. dev_err(&pdev->dev, "device_register failed\n");
  1621. goto clean_up;
  1622. }
  1623. init_timer(&r8a66597->timer);
  1624. r8a66597->timer.function = r8a66597_timer;
  1625. r8a66597->timer.data = (unsigned long)r8a66597;
  1626. r8a66597->reg = reg;
  1627. #ifdef CONFIG_HAVE_CLK
  1628. if (r8a66597->pdata->on_chip) {
  1629. snprintf(clk_name, sizeof(clk_name), "usb%d", pdev->id);
  1630. r8a66597->clk = clk_get(&pdev->dev, clk_name);
  1631. if (IS_ERR(r8a66597->clk)) {
  1632. dev_err(&pdev->dev, "cannot get clock \"%s\"\n",
  1633. clk_name);
  1634. ret = PTR_ERR(r8a66597->clk);
  1635. goto clean_up_dev;
  1636. }
  1637. clk_enable(r8a66597->clk);
  1638. }
  1639. #endif
  1640. if (r8a66597->pdata->sudmac) {
  1641. ret = r8a66597_sudmac_ioremap(r8a66597, pdev);
  1642. if (ret < 0)
  1643. goto clean_up2;
  1644. }
  1645. disable_controller(r8a66597); /* make sure controller is disabled */
  1646. ret = request_irq(irq, r8a66597_irq, IRQF_SHARED,
  1647. udc_name, r8a66597);
  1648. if (ret < 0) {
  1649. dev_err(&pdev->dev, "request_irq error (%d)\n", ret);
  1650. goto clean_up2;
  1651. }
  1652. INIT_LIST_HEAD(&r8a66597->gadget.ep_list);
  1653. r8a66597->gadget.ep0 = &r8a66597->ep[0].ep;
  1654. INIT_LIST_HEAD(&r8a66597->gadget.ep0->ep_list);
  1655. for (i = 0; i < R8A66597_MAX_NUM_PIPE; i++) {
  1656. struct r8a66597_ep *ep = &r8a66597->ep[i];
  1657. if (i != 0) {
  1658. INIT_LIST_HEAD(&r8a66597->ep[i].ep.ep_list);
  1659. list_add_tail(&r8a66597->ep[i].ep.ep_list,
  1660. &r8a66597->gadget.ep_list);
  1661. }
  1662. ep->r8a66597 = r8a66597;
  1663. INIT_LIST_HEAD(&ep->queue);
  1664. ep->ep.name = r8a66597_ep_name[i];
  1665. ep->ep.ops = &r8a66597_ep_ops;
  1666. ep->ep.maxpacket = 512;
  1667. }
  1668. r8a66597->ep[0].ep.maxpacket = 64;
  1669. r8a66597->ep[0].pipenum = 0;
  1670. r8a66597->ep[0].fifoaddr = CFIFO;
  1671. r8a66597->ep[0].fifosel = CFIFOSEL;
  1672. r8a66597->ep[0].fifoctr = CFIFOCTR;
  1673. r8a66597->ep[0].pipectr = get_pipectr_addr(0);
  1674. r8a66597->pipenum2ep[0] = &r8a66597->ep[0];
  1675. r8a66597->epaddr2ep[0] = &r8a66597->ep[0];
  1676. r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
  1677. GFP_KERNEL);
  1678. if (r8a66597->ep0_req == NULL)
  1679. goto clean_up3;
  1680. r8a66597->ep0_req->complete = nop_completion;
  1681. ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget);
  1682. if (ret)
  1683. goto err_add_udc;
  1684. dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION);
  1685. return 0;
  1686. err_add_udc:
  1687. r8a66597_free_request(&r8a66597->ep[0].ep, r8a66597->ep0_req);
  1688. clean_up3:
  1689. free_irq(irq, r8a66597);
  1690. clean_up2:
  1691. #ifdef CONFIG_HAVE_CLK
  1692. if (r8a66597->pdata->on_chip) {
  1693. clk_disable(r8a66597->clk);
  1694. clk_put(r8a66597->clk);
  1695. }
  1696. clean_up_dev:
  1697. #endif
  1698. device_unregister(&r8a66597->gadget.dev);
  1699. clean_up:
  1700. if (r8a66597) {
  1701. if (r8a66597->sudmac_reg)
  1702. iounmap(r8a66597->sudmac_reg);
  1703. if (r8a66597->ep0_req)
  1704. r8a66597_free_request(&r8a66597->ep[0].ep,
  1705. r8a66597->ep0_req);
  1706. kfree(r8a66597);
  1707. }
  1708. if (reg)
  1709. iounmap(reg);
  1710. return ret;
  1711. }
  1712. /*-------------------------------------------------------------------------*/
  1713. static struct platform_driver r8a66597_driver = {
  1714. .remove = __exit_p(r8a66597_remove),
  1715. .driver = {
  1716. .name = (char *) udc_name,
  1717. },
  1718. };
  1719. MODULE_ALIAS("platform:r8a66597_udc");
  1720. static int __init r8a66597_udc_init(void)
  1721. {
  1722. return platform_driver_probe(&r8a66597_driver, r8a66597_probe);
  1723. }
  1724. module_init(r8a66597_udc_init);
  1725. static void __exit r8a66597_udc_cleanup(void)
  1726. {
  1727. platform_driver_unregister(&r8a66597_driver);
  1728. }
  1729. module_exit(r8a66597_udc_cleanup);
  1730. MODULE_DESCRIPTION("R8A66597 USB gadget driver");
  1731. MODULE_LICENSE("GPL");
  1732. MODULE_AUTHOR("Yoshihiro Shimoda");