u132-hcd.c 91 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262
  1. /*
  2. * Host Controller Driver for the Elan Digital Systems U132 adapter
  3. *
  4. * Copyright(C) 2006 Elan Digital Systems Limited
  5. * http://www.elandigitalsystems.com
  6. *
  7. * Author and Maintainer - Tony Olech - Elan Digital Systems
  8. * tony.olech@elandigitalsystems.com
  9. *
  10. * This program is free software;you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation, version 2.
  13. *
  14. *
  15. * This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
  16. * based on various USB host drivers in the 2.6.15 linux kernel
  17. * with constant reference to the 3rd Edition of Linux Device Drivers
  18. * published by O'Reilly
  19. *
  20. * The U132 adapter is a USB to CardBus adapter specifically designed
  21. * for PC cards that contain an OHCI host controller. Typical PC cards
  22. * are the Orange Mobile 3G Option GlobeTrotter Fusion card.
  23. *
  24. * The U132 adapter will *NOT *work with PC cards that do not contain
  25. * an OHCI controller. A simple way to test whether a PC card has an
  26. * OHCI controller as an interface is to insert the PC card directly
  27. * into a laptop(or desktop) with a CardBus slot and if "lspci" shows
  28. * a new USB controller and "lsusb -v" shows a new OHCI Host Controller
  29. * then there is a good chance that the U132 adapter will support the
  30. * PC card.(you also need the specific client driver for the PC card)
  31. *
  32. * Please inform the Author and Maintainer about any PC cards that
  33. * contain OHCI Host Controller and work when directly connected to
  34. * an embedded CardBus slot but do not work when they are connected
  35. * via an ELAN U132 adapter.
  36. *
  37. */
  38. #include <linux/kernel.h>
  39. #include <linux/module.h>
  40. #include <linux/moduleparam.h>
  41. #include <linux/delay.h>
  42. #include <linux/ioport.h>
  43. #include <linux/pci_ids.h>
  44. #include <linux/sched.h>
  45. #include <linux/slab.h>
  46. #include <linux/errno.h>
  47. #include <linux/init.h>
  48. #include <linux/timer.h>
  49. #include <linux/list.h>
  50. #include <linux/interrupt.h>
  51. #include <linux/usb.h>
  52. #include <linux/usb/hcd.h>
  53. #include <linux/workqueue.h>
  54. #include <linux/platform_device.h>
  55. #include <linux/mutex.h>
  56. #include <asm/io.h>
  57. #include <asm/irq.h>
  58. #include <asm/system.h>
  59. #include <asm/byteorder.h>
  60. /* FIXME ohci.h is ONLY for internal use by the OHCI driver.
  61. * If you're going to try stuff like this, you need to split
  62. * out shareable stuff (register declarations?) into its own
  63. * file, maybe name <linux/usb/ohci.h>
  64. */
  65. #include "ohci.h"
  66. #define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
  67. #define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \
  68. OHCI_INTR_WDH)
  69. MODULE_AUTHOR("Tony Olech - Elan Digital Systems Limited");
  70. MODULE_DESCRIPTION("U132 USB Host Controller Driver");
  71. MODULE_LICENSE("GPL");
  72. #define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
  73. INT_MODULE_PARM(testing, 0);
  74. /* Some boards misreport power switching/overcurrent*/
  75. static int distrust_firmware = 1;
  76. module_param(distrust_firmware, bool, 0);
  77. MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
  78. "t setup");
  79. static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait);
  80. /*
  81. * u132_module_lock exists to protect access to global variables
  82. *
  83. */
  84. static struct mutex u132_module_lock;
  85. static int u132_exiting;
  86. static int u132_instances;
  87. static struct list_head u132_static_list;
  88. /*
  89. * end of the global variables protected by u132_module_lock
  90. */
  91. static struct workqueue_struct *workqueue;
  92. #define MAX_U132_PORTS 7
  93. #define MAX_U132_ADDRS 128
  94. #define MAX_U132_UDEVS 4
  95. #define MAX_U132_ENDPS 100
  96. #define MAX_U132_RINGS 4
  97. static const char *cc_to_text[16] = {
  98. "No Error ",
  99. "CRC Error ",
  100. "Bit Stuff ",
  101. "Data Togg ",
  102. "Stall ",
  103. "DevNotResp ",
  104. "PIDCheck ",
  105. "UnExpPID ",
  106. "DataOver ",
  107. "DataUnder ",
  108. "(for hw) ",
  109. "(for hw) ",
  110. "BufferOver ",
  111. "BuffUnder ",
  112. "(for HCD) ",
  113. "(for HCD) "
  114. };
  115. struct u132_port {
  116. struct u132 *u132;
  117. int reset;
  118. int enable;
  119. int power;
  120. int Status;
  121. };
  122. struct u132_addr {
  123. u8 address;
  124. };
  125. struct u132_udev {
  126. struct kref kref;
  127. struct usb_device *usb_device;
  128. u8 enumeration;
  129. u8 udev_number;
  130. u8 usb_addr;
  131. u8 portnumber;
  132. u8 endp_number_in[16];
  133. u8 endp_number_out[16];
  134. };
  135. #define ENDP_QUEUE_SHIFT 3
  136. #define ENDP_QUEUE_SIZE (1<<ENDP_QUEUE_SHIFT)
  137. #define ENDP_QUEUE_MASK (ENDP_QUEUE_SIZE-1)
  138. struct u132_urbq {
  139. struct list_head urb_more;
  140. struct urb *urb;
  141. };
  142. struct u132_spin {
  143. spinlock_t slock;
  144. };
  145. struct u132_endp {
  146. struct kref kref;
  147. u8 udev_number;
  148. u8 endp_number;
  149. u8 usb_addr;
  150. u8 usb_endp;
  151. struct u132 *u132;
  152. struct list_head endp_ring;
  153. struct u132_ring *ring;
  154. unsigned toggle_bits:2;
  155. unsigned active:1;
  156. unsigned delayed:1;
  157. unsigned input:1;
  158. unsigned output:1;
  159. unsigned pipetype:2;
  160. unsigned dequeueing:1;
  161. unsigned edset_flush:1;
  162. unsigned spare_bits:14;
  163. unsigned long jiffies;
  164. struct usb_host_endpoint *hep;
  165. struct u132_spin queue_lock;
  166. u16 queue_size;
  167. u16 queue_last;
  168. u16 queue_next;
  169. struct urb *urb_list[ENDP_QUEUE_SIZE];
  170. struct list_head urb_more;
  171. struct delayed_work scheduler;
  172. };
  173. struct u132_ring {
  174. unsigned in_use:1;
  175. unsigned length:7;
  176. u8 number;
  177. struct u132 *u132;
  178. struct u132_endp *curr_endp;
  179. struct delayed_work scheduler;
  180. };
  181. struct u132 {
  182. struct kref kref;
  183. struct list_head u132_list;
  184. struct mutex sw_lock;
  185. struct mutex scheduler_lock;
  186. struct u132_platform_data *board;
  187. struct platform_device *platform_dev;
  188. struct u132_ring ring[MAX_U132_RINGS];
  189. int sequence_num;
  190. int going;
  191. int power;
  192. int reset;
  193. int num_ports;
  194. u32 hc_control;
  195. u32 hc_fminterval;
  196. u32 hc_roothub_status;
  197. u32 hc_roothub_a;
  198. u32 hc_roothub_portstatus[MAX_ROOT_PORTS];
  199. int flags;
  200. unsigned long next_statechange;
  201. struct delayed_work monitor;
  202. int num_endpoints;
  203. struct u132_addr addr[MAX_U132_ADDRS];
  204. struct u132_udev udev[MAX_U132_UDEVS];
  205. struct u132_port port[MAX_U132_PORTS];
  206. struct u132_endp *endp[MAX_U132_ENDPS];
  207. };
  208. /*
  209. * these cannot be inlines because we need the structure offset!!
  210. * Does anyone have a better way?????
  211. */
  212. #define ftdi_read_pcimem(pdev, member, data) usb_ftdi_elan_read_pcimem(pdev, \
  213. offsetof(struct ohci_regs, member), 0, data);
  214. #define ftdi_write_pcimem(pdev, member, data) usb_ftdi_elan_write_pcimem(pdev, \
  215. offsetof(struct ohci_regs, member), 0, data);
  216. #define u132_read_pcimem(u132, member, data) \
  217. usb_ftdi_elan_read_pcimem(u132->platform_dev, offsetof(struct \
  218. ohci_regs, member), 0, data);
  219. #define u132_write_pcimem(u132, member, data) \
  220. usb_ftdi_elan_write_pcimem(u132->platform_dev, offsetof(struct \
  221. ohci_regs, member), 0, data);
  222. static inline struct u132 *udev_to_u132(struct u132_udev *udev)
  223. {
  224. u8 udev_number = udev->udev_number;
  225. return container_of(udev, struct u132, udev[udev_number]);
  226. }
  227. static inline struct u132 *hcd_to_u132(struct usb_hcd *hcd)
  228. {
  229. return (struct u132 *)(hcd->hcd_priv);
  230. }
  231. static inline struct usb_hcd *u132_to_hcd(struct u132 *u132)
  232. {
  233. return container_of((void *)u132, struct usb_hcd, hcd_priv);
  234. }
  235. static inline void u132_disable(struct u132 *u132)
  236. {
  237. u132_to_hcd(u132)->state = HC_STATE_HALT;
  238. }
  239. #define kref_to_u132(d) container_of(d, struct u132, kref)
  240. #define kref_to_u132_endp(d) container_of(d, struct u132_endp, kref)
  241. #define kref_to_u132_udev(d) container_of(d, struct u132_udev, kref)
  242. #include "../misc/usb_u132.h"
  243. static const char hcd_name[] = "u132_hcd";
  244. #define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE | \
  245. USB_PORT_STAT_C_SUSPEND | USB_PORT_STAT_C_OVERCURRENT | \
  246. USB_PORT_STAT_C_RESET) << 16)
  247. static void u132_hcd_delete(struct kref *kref)
  248. {
  249. struct u132 *u132 = kref_to_u132(kref);
  250. struct platform_device *pdev = u132->platform_dev;
  251. struct usb_hcd *hcd = u132_to_hcd(u132);
  252. u132->going += 1;
  253. mutex_lock(&u132_module_lock);
  254. list_del_init(&u132->u132_list);
  255. u132_instances -= 1;
  256. mutex_unlock(&u132_module_lock);
  257. dev_warn(&u132->platform_dev->dev, "FREEING the hcd=%p and thus the u13"
  258. "2=%p going=%d pdev=%p\n", hcd, u132, u132->going, pdev);
  259. usb_put_hcd(hcd);
  260. }
  261. static inline void u132_u132_put_kref(struct u132 *u132)
  262. {
  263. kref_put(&u132->kref, u132_hcd_delete);
  264. }
  265. static inline void u132_u132_init_kref(struct u132 *u132)
  266. {
  267. kref_init(&u132->kref);
  268. }
  269. static void u132_udev_delete(struct kref *kref)
  270. {
  271. struct u132_udev *udev = kref_to_u132_udev(kref);
  272. udev->udev_number = 0;
  273. udev->usb_device = NULL;
  274. udev->usb_addr = 0;
  275. udev->enumeration = 0;
  276. }
  277. static inline void u132_udev_put_kref(struct u132 *u132, struct u132_udev *udev)
  278. {
  279. kref_put(&udev->kref, u132_udev_delete);
  280. }
  281. static inline void u132_udev_get_kref(struct u132 *u132, struct u132_udev *udev)
  282. {
  283. kref_get(&udev->kref);
  284. }
  285. static inline void u132_udev_init_kref(struct u132 *u132,
  286. struct u132_udev *udev)
  287. {
  288. kref_init(&udev->kref);
  289. }
  290. static inline void u132_ring_put_kref(struct u132 *u132, struct u132_ring *ring)
  291. {
  292. kref_put(&u132->kref, u132_hcd_delete);
  293. }
  294. static void u132_ring_requeue_work(struct u132 *u132, struct u132_ring *ring,
  295. unsigned int delta)
  296. {
  297. if (delta > 0) {
  298. if (queue_delayed_work(workqueue, &ring->scheduler, delta))
  299. return;
  300. } else if (queue_delayed_work(workqueue, &ring->scheduler, 0))
  301. return;
  302. kref_put(&u132->kref, u132_hcd_delete);
  303. }
  304. static void u132_ring_queue_work(struct u132 *u132, struct u132_ring *ring,
  305. unsigned int delta)
  306. {
  307. kref_get(&u132->kref);
  308. u132_ring_requeue_work(u132, ring, delta);
  309. }
  310. static void u132_ring_cancel_work(struct u132 *u132, struct u132_ring *ring)
  311. {
  312. if (cancel_delayed_work(&ring->scheduler))
  313. kref_put(&u132->kref, u132_hcd_delete);
  314. }
  315. static void u132_endp_delete(struct kref *kref)
  316. {
  317. struct u132_endp *endp = kref_to_u132_endp(kref);
  318. struct u132 *u132 = endp->u132;
  319. u8 usb_addr = endp->usb_addr;
  320. u8 usb_endp = endp->usb_endp;
  321. u8 address = u132->addr[usb_addr].address;
  322. struct u132_udev *udev = &u132->udev[address];
  323. u8 endp_number = endp->endp_number;
  324. struct usb_host_endpoint *hep = endp->hep;
  325. struct u132_ring *ring = endp->ring;
  326. struct list_head *head = &endp->endp_ring;
  327. ring->length -= 1;
  328. if (endp == ring->curr_endp) {
  329. if (list_empty(head)) {
  330. ring->curr_endp = NULL;
  331. list_del(head);
  332. } else {
  333. struct u132_endp *next_endp = list_entry(head->next,
  334. struct u132_endp, endp_ring);
  335. ring->curr_endp = next_endp;
  336. list_del(head);
  337. }
  338. } else
  339. list_del(head);
  340. if (endp->input) {
  341. udev->endp_number_in[usb_endp] = 0;
  342. u132_udev_put_kref(u132, udev);
  343. }
  344. if (endp->output) {
  345. udev->endp_number_out[usb_endp] = 0;
  346. u132_udev_put_kref(u132, udev);
  347. }
  348. u132->endp[endp_number - 1] = NULL;
  349. hep->hcpriv = NULL;
  350. kfree(endp);
  351. u132_u132_put_kref(u132);
  352. }
  353. static inline void u132_endp_put_kref(struct u132 *u132, struct u132_endp *endp)
  354. {
  355. kref_put(&endp->kref, u132_endp_delete);
  356. }
  357. static inline void u132_endp_get_kref(struct u132 *u132, struct u132_endp *endp)
  358. {
  359. kref_get(&endp->kref);
  360. }
  361. static inline void u132_endp_init_kref(struct u132 *u132,
  362. struct u132_endp *endp)
  363. {
  364. kref_init(&endp->kref);
  365. kref_get(&u132->kref);
  366. }
  367. static void u132_endp_queue_work(struct u132 *u132, struct u132_endp *endp,
  368. unsigned int delta)
  369. {
  370. if (queue_delayed_work(workqueue, &endp->scheduler, delta))
  371. kref_get(&endp->kref);
  372. }
  373. static void u132_endp_cancel_work(struct u132 *u132, struct u132_endp *endp)
  374. {
  375. if (cancel_delayed_work(&endp->scheduler))
  376. kref_put(&endp->kref, u132_endp_delete);
  377. }
  378. static inline void u132_monitor_put_kref(struct u132 *u132)
  379. {
  380. kref_put(&u132->kref, u132_hcd_delete);
  381. }
  382. static void u132_monitor_queue_work(struct u132 *u132, unsigned int delta)
  383. {
  384. if (queue_delayed_work(workqueue, &u132->monitor, delta))
  385. kref_get(&u132->kref);
  386. }
  387. static void u132_monitor_requeue_work(struct u132 *u132, unsigned int delta)
  388. {
  389. if (!queue_delayed_work(workqueue, &u132->monitor, delta))
  390. kref_put(&u132->kref, u132_hcd_delete);
  391. }
  392. static void u132_monitor_cancel_work(struct u132 *u132)
  393. {
  394. if (cancel_delayed_work(&u132->monitor))
  395. kref_put(&u132->kref, u132_hcd_delete);
  396. }
  397. static int read_roothub_info(struct u132 *u132)
  398. {
  399. u32 revision;
  400. int retval;
  401. retval = u132_read_pcimem(u132, revision, &revision);
  402. if (retval) {
  403. dev_err(&u132->platform_dev->dev, "error %d accessing device co"
  404. "ntrol\n", retval);
  405. return retval;
  406. } else if ((revision & 0xFF) == 0x10) {
  407. } else if ((revision & 0xFF) == 0x11) {
  408. } else {
  409. dev_err(&u132->platform_dev->dev, "device revision is not valid"
  410. " %08X\n", revision);
  411. return -ENODEV;
  412. }
  413. retval = u132_read_pcimem(u132, control, &u132->hc_control);
  414. if (retval) {
  415. dev_err(&u132->platform_dev->dev, "error %d accessing device co"
  416. "ntrol\n", retval);
  417. return retval;
  418. }
  419. retval = u132_read_pcimem(u132, roothub.status,
  420. &u132->hc_roothub_status);
  421. if (retval) {
  422. dev_err(&u132->platform_dev->dev, "error %d accessing device re"
  423. "g roothub.status\n", retval);
  424. return retval;
  425. }
  426. retval = u132_read_pcimem(u132, roothub.a, &u132->hc_roothub_a);
  427. if (retval) {
  428. dev_err(&u132->platform_dev->dev, "error %d accessing device re"
  429. "g roothub.a\n", retval);
  430. return retval;
  431. }
  432. {
  433. int I = u132->num_ports;
  434. int i = 0;
  435. while (I-- > 0) {
  436. retval = u132_read_pcimem(u132, roothub.portstatus[i],
  437. &u132->hc_roothub_portstatus[i]);
  438. if (retval) {
  439. dev_err(&u132->platform_dev->dev, "error %d acc"
  440. "essing device roothub.portstatus[%d]\n"
  441. , retval, i);
  442. return retval;
  443. } else
  444. i += 1;
  445. }
  446. }
  447. return 0;
  448. }
  449. static void u132_hcd_monitor_work(struct work_struct *work)
  450. {
  451. struct u132 *u132 = container_of(work, struct u132, monitor.work);
  452. if (u132->going > 1) {
  453. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  454. , u132->going);
  455. u132_monitor_put_kref(u132);
  456. return;
  457. } else if (u132->going > 0) {
  458. dev_err(&u132->platform_dev->dev, "device is being removed\n");
  459. u132_monitor_put_kref(u132);
  460. return;
  461. } else {
  462. int retval;
  463. mutex_lock(&u132->sw_lock);
  464. retval = read_roothub_info(u132);
  465. if (retval) {
  466. struct usb_hcd *hcd = u132_to_hcd(u132);
  467. u132_disable(u132);
  468. u132->going = 1;
  469. mutex_unlock(&u132->sw_lock);
  470. usb_hc_died(hcd);
  471. ftdi_elan_gone_away(u132->platform_dev);
  472. u132_monitor_put_kref(u132);
  473. return;
  474. } else {
  475. u132_monitor_requeue_work(u132, 500);
  476. mutex_unlock(&u132->sw_lock);
  477. return;
  478. }
  479. }
  480. }
  481. static void u132_hcd_giveback_urb(struct u132 *u132, struct u132_endp *endp,
  482. struct urb *urb, int status)
  483. {
  484. struct u132_ring *ring;
  485. unsigned long irqs;
  486. struct usb_hcd *hcd = u132_to_hcd(u132);
  487. urb->error_count = 0;
  488. spin_lock_irqsave(&endp->queue_lock.slock, irqs);
  489. usb_hcd_unlink_urb_from_ep(hcd, urb);
  490. endp->queue_next += 1;
  491. if (ENDP_QUEUE_SIZE > --endp->queue_size) {
  492. endp->active = 0;
  493. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  494. } else {
  495. struct list_head *next = endp->urb_more.next;
  496. struct u132_urbq *urbq = list_entry(next, struct u132_urbq,
  497. urb_more);
  498. list_del(next);
  499. endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
  500. urbq->urb;
  501. endp->active = 0;
  502. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  503. kfree(urbq);
  504. }
  505. mutex_lock(&u132->scheduler_lock);
  506. ring = endp->ring;
  507. ring->in_use = 0;
  508. u132_ring_cancel_work(u132, ring);
  509. u132_ring_queue_work(u132, ring, 0);
  510. mutex_unlock(&u132->scheduler_lock);
  511. u132_endp_put_kref(u132, endp);
  512. usb_hcd_giveback_urb(hcd, urb, status);
  513. }
  514. static void u132_hcd_forget_urb(struct u132 *u132, struct u132_endp *endp,
  515. struct urb *urb, int status)
  516. {
  517. u132_endp_put_kref(u132, endp);
  518. }
  519. static void u132_hcd_abandon_urb(struct u132 *u132, struct u132_endp *endp,
  520. struct urb *urb, int status)
  521. {
  522. unsigned long irqs;
  523. struct usb_hcd *hcd = u132_to_hcd(u132);
  524. urb->error_count = 0;
  525. spin_lock_irqsave(&endp->queue_lock.slock, irqs);
  526. usb_hcd_unlink_urb_from_ep(hcd, urb);
  527. endp->queue_next += 1;
  528. if (ENDP_QUEUE_SIZE > --endp->queue_size) {
  529. endp->active = 0;
  530. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  531. } else {
  532. struct list_head *next = endp->urb_more.next;
  533. struct u132_urbq *urbq = list_entry(next, struct u132_urbq,
  534. urb_more);
  535. list_del(next);
  536. endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
  537. urbq->urb;
  538. endp->active = 0;
  539. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  540. kfree(urbq);
  541. }
  542. usb_hcd_giveback_urb(hcd, urb, status);
  543. }
  544. static inline int edset_input(struct u132 *u132, struct u132_ring *ring,
  545. struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
  546. void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
  547. int toggle_bits, int error_count, int condition_code, int repeat_number,
  548. int halted, int skipped, int actual, int non_null))
  549. {
  550. return usb_ftdi_elan_edset_input(u132->platform_dev, ring->number, endp,
  551. urb, address, endp->usb_endp, toggle_bits, callback);
  552. }
  553. static inline int edset_setup(struct u132 *u132, struct u132_ring *ring,
  554. struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
  555. void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
  556. int toggle_bits, int error_count, int condition_code, int repeat_number,
  557. int halted, int skipped, int actual, int non_null))
  558. {
  559. return usb_ftdi_elan_edset_setup(u132->platform_dev, ring->number, endp,
  560. urb, address, endp->usb_endp, toggle_bits, callback);
  561. }
  562. static inline int edset_single(struct u132 *u132, struct u132_ring *ring,
  563. struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
  564. void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
  565. int toggle_bits, int error_count, int condition_code, int repeat_number,
  566. int halted, int skipped, int actual, int non_null))
  567. {
  568. return usb_ftdi_elan_edset_single(u132->platform_dev, ring->number,
  569. endp, urb, address, endp->usb_endp, toggle_bits, callback);
  570. }
  571. static inline int edset_output(struct u132 *u132, struct u132_ring *ring,
  572. struct u132_endp *endp, struct urb *urb, u8 address, u8 toggle_bits,
  573. void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
  574. int toggle_bits, int error_count, int condition_code, int repeat_number,
  575. int halted, int skipped, int actual, int non_null))
  576. {
  577. return usb_ftdi_elan_edset_output(u132->platform_dev, ring->number,
  578. endp, urb, address, endp->usb_endp, toggle_bits, callback);
  579. }
  580. /*
  581. * must not LOCK sw_lock
  582. *
  583. */
  584. static void u132_hcd_interrupt_recv(void *data, struct urb *urb, u8 *buf,
  585. int len, int toggle_bits, int error_count, int condition_code,
  586. int repeat_number, int halted, int skipped, int actual, int non_null)
  587. {
  588. struct u132_endp *endp = data;
  589. struct u132 *u132 = endp->u132;
  590. u8 address = u132->addr[endp->usb_addr].address;
  591. struct u132_udev *udev = &u132->udev[address];
  592. mutex_lock(&u132->scheduler_lock);
  593. if (u132->going > 1) {
  594. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  595. , u132->going);
  596. mutex_unlock(&u132->scheduler_lock);
  597. u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
  598. return;
  599. } else if (endp->dequeueing) {
  600. endp->dequeueing = 0;
  601. mutex_unlock(&u132->scheduler_lock);
  602. u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
  603. return;
  604. } else if (u132->going > 0) {
  605. dev_err(&u132->platform_dev->dev, "device is being removed "
  606. "urb=%p\n", urb);
  607. mutex_unlock(&u132->scheduler_lock);
  608. u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
  609. return;
  610. } else if (!urb->unlinked) {
  611. struct u132_ring *ring = endp->ring;
  612. u8 *u = urb->transfer_buffer + urb->actual_length;
  613. u8 *b = buf;
  614. int L = len;
  615. while (L-- > 0)
  616. *u++ = *b++;
  617. urb->actual_length += len;
  618. if ((condition_code == TD_CC_NOERROR) &&
  619. (urb->transfer_buffer_length > urb->actual_length)) {
  620. endp->toggle_bits = toggle_bits;
  621. usb_settoggle(udev->usb_device, endp->usb_endp, 0,
  622. 1 & toggle_bits);
  623. if (urb->actual_length > 0) {
  624. int retval;
  625. mutex_unlock(&u132->scheduler_lock);
  626. retval = edset_single(u132, ring, endp, urb,
  627. address, endp->toggle_bits,
  628. u132_hcd_interrupt_recv);
  629. if (retval != 0)
  630. u132_hcd_giveback_urb(u132, endp, urb,
  631. retval);
  632. } else {
  633. ring->in_use = 0;
  634. endp->active = 0;
  635. endp->jiffies = jiffies +
  636. msecs_to_jiffies(urb->interval);
  637. u132_ring_cancel_work(u132, ring);
  638. u132_ring_queue_work(u132, ring, 0);
  639. mutex_unlock(&u132->scheduler_lock);
  640. u132_endp_put_kref(u132, endp);
  641. }
  642. return;
  643. } else if ((condition_code == TD_DATAUNDERRUN) &&
  644. ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)) {
  645. endp->toggle_bits = toggle_bits;
  646. usb_settoggle(udev->usb_device, endp->usb_endp, 0,
  647. 1 & toggle_bits);
  648. mutex_unlock(&u132->scheduler_lock);
  649. u132_hcd_giveback_urb(u132, endp, urb, 0);
  650. return;
  651. } else {
  652. if (condition_code == TD_CC_NOERROR) {
  653. endp->toggle_bits = toggle_bits;
  654. usb_settoggle(udev->usb_device, endp->usb_endp,
  655. 0, 1 & toggle_bits);
  656. } else if (condition_code == TD_CC_STALL) {
  657. endp->toggle_bits = 0x2;
  658. usb_settoggle(udev->usb_device, endp->usb_endp,
  659. 0, 0);
  660. } else {
  661. endp->toggle_bits = 0x2;
  662. usb_settoggle(udev->usb_device, endp->usb_endp,
  663. 0, 0);
  664. dev_err(&u132->platform_dev->dev, "urb=%p givin"
  665. "g back INTERRUPT %s\n", urb,
  666. cc_to_text[condition_code]);
  667. }
  668. mutex_unlock(&u132->scheduler_lock);
  669. u132_hcd_giveback_urb(u132, endp, urb,
  670. cc_to_error[condition_code]);
  671. return;
  672. }
  673. } else {
  674. dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
  675. "unlinked=%d\n", urb, urb->unlinked);
  676. mutex_unlock(&u132->scheduler_lock);
  677. u132_hcd_giveback_urb(u132, endp, urb, 0);
  678. return;
  679. }
  680. }
  681. static void u132_hcd_bulk_output_sent(void *data, struct urb *urb, u8 *buf,
  682. int len, int toggle_bits, int error_count, int condition_code,
  683. int repeat_number, int halted, int skipped, int actual, int non_null)
  684. {
  685. struct u132_endp *endp = data;
  686. struct u132 *u132 = endp->u132;
  687. u8 address = u132->addr[endp->usb_addr].address;
  688. mutex_lock(&u132->scheduler_lock);
  689. if (u132->going > 1) {
  690. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  691. , u132->going);
  692. mutex_unlock(&u132->scheduler_lock);
  693. u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
  694. return;
  695. } else if (endp->dequeueing) {
  696. endp->dequeueing = 0;
  697. mutex_unlock(&u132->scheduler_lock);
  698. u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
  699. return;
  700. } else if (u132->going > 0) {
  701. dev_err(&u132->platform_dev->dev, "device is being removed "
  702. "urb=%p\n", urb);
  703. mutex_unlock(&u132->scheduler_lock);
  704. u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
  705. return;
  706. } else if (!urb->unlinked) {
  707. struct u132_ring *ring = endp->ring;
  708. urb->actual_length += len;
  709. endp->toggle_bits = toggle_bits;
  710. if (urb->transfer_buffer_length > urb->actual_length) {
  711. int retval;
  712. mutex_unlock(&u132->scheduler_lock);
  713. retval = edset_output(u132, ring, endp, urb, address,
  714. endp->toggle_bits, u132_hcd_bulk_output_sent);
  715. if (retval != 0)
  716. u132_hcd_giveback_urb(u132, endp, urb, retval);
  717. return;
  718. } else {
  719. mutex_unlock(&u132->scheduler_lock);
  720. u132_hcd_giveback_urb(u132, endp, urb, 0);
  721. return;
  722. }
  723. } else {
  724. dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
  725. "unlinked=%d\n", urb, urb->unlinked);
  726. mutex_unlock(&u132->scheduler_lock);
  727. u132_hcd_giveback_urb(u132, endp, urb, 0);
  728. return;
  729. }
  730. }
  731. static void u132_hcd_bulk_input_recv(void *data, struct urb *urb, u8 *buf,
  732. int len, int toggle_bits, int error_count, int condition_code,
  733. int repeat_number, int halted, int skipped, int actual, int non_null)
  734. {
  735. struct u132_endp *endp = data;
  736. struct u132 *u132 = endp->u132;
  737. u8 address = u132->addr[endp->usb_addr].address;
  738. struct u132_udev *udev = &u132->udev[address];
  739. mutex_lock(&u132->scheduler_lock);
  740. if (u132->going > 1) {
  741. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  742. , u132->going);
  743. mutex_unlock(&u132->scheduler_lock);
  744. u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
  745. return;
  746. } else if (endp->dequeueing) {
  747. endp->dequeueing = 0;
  748. mutex_unlock(&u132->scheduler_lock);
  749. u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
  750. return;
  751. } else if (u132->going > 0) {
  752. dev_err(&u132->platform_dev->dev, "device is being removed "
  753. "urb=%p\n", urb);
  754. mutex_unlock(&u132->scheduler_lock);
  755. u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
  756. return;
  757. } else if (!urb->unlinked) {
  758. struct u132_ring *ring = endp->ring;
  759. u8 *u = urb->transfer_buffer + urb->actual_length;
  760. u8 *b = buf;
  761. int L = len;
  762. while (L-- > 0)
  763. *u++ = *b++;
  764. urb->actual_length += len;
  765. if ((condition_code == TD_CC_NOERROR) &&
  766. (urb->transfer_buffer_length > urb->actual_length)) {
  767. int retval;
  768. endp->toggle_bits = toggle_bits;
  769. usb_settoggle(udev->usb_device, endp->usb_endp, 0,
  770. 1 & toggle_bits);
  771. mutex_unlock(&u132->scheduler_lock);
  772. retval = usb_ftdi_elan_edset_input(u132->platform_dev,
  773. ring->number, endp, urb, address,
  774. endp->usb_endp, endp->toggle_bits,
  775. u132_hcd_bulk_input_recv);
  776. if (retval != 0)
  777. u132_hcd_giveback_urb(u132, endp, urb, retval);
  778. return;
  779. } else if (condition_code == TD_CC_NOERROR) {
  780. endp->toggle_bits = toggle_bits;
  781. usb_settoggle(udev->usb_device, endp->usb_endp, 0,
  782. 1 & toggle_bits);
  783. mutex_unlock(&u132->scheduler_lock);
  784. u132_hcd_giveback_urb(u132, endp, urb,
  785. cc_to_error[condition_code]);
  786. return;
  787. } else if ((condition_code == TD_DATAUNDERRUN) &&
  788. ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)) {
  789. endp->toggle_bits = toggle_bits;
  790. usb_settoggle(udev->usb_device, endp->usb_endp, 0,
  791. 1 & toggle_bits);
  792. mutex_unlock(&u132->scheduler_lock);
  793. u132_hcd_giveback_urb(u132, endp, urb, 0);
  794. return;
  795. } else if (condition_code == TD_DATAUNDERRUN) {
  796. endp->toggle_bits = toggle_bits;
  797. usb_settoggle(udev->usb_device, endp->usb_endp, 0,
  798. 1 & toggle_bits);
  799. dev_warn(&u132->platform_dev->dev, "urb=%p(SHORT NOT OK"
  800. ") giving back BULK IN %s\n", urb,
  801. cc_to_text[condition_code]);
  802. mutex_unlock(&u132->scheduler_lock);
  803. u132_hcd_giveback_urb(u132, endp, urb, 0);
  804. return;
  805. } else if (condition_code == TD_CC_STALL) {
  806. endp->toggle_bits = 0x2;
  807. usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0);
  808. mutex_unlock(&u132->scheduler_lock);
  809. u132_hcd_giveback_urb(u132, endp, urb,
  810. cc_to_error[condition_code]);
  811. return;
  812. } else {
  813. endp->toggle_bits = 0x2;
  814. usb_settoggle(udev->usb_device, endp->usb_endp, 0, 0);
  815. dev_err(&u132->platform_dev->dev, "urb=%p giving back B"
  816. "ULK IN code=%d %s\n", urb, condition_code,
  817. cc_to_text[condition_code]);
  818. mutex_unlock(&u132->scheduler_lock);
  819. u132_hcd_giveback_urb(u132, endp, urb,
  820. cc_to_error[condition_code]);
  821. return;
  822. }
  823. } else {
  824. dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
  825. "unlinked=%d\n", urb, urb->unlinked);
  826. mutex_unlock(&u132->scheduler_lock);
  827. u132_hcd_giveback_urb(u132, endp, urb, 0);
  828. return;
  829. }
  830. }
  831. static void u132_hcd_configure_empty_sent(void *data, struct urb *urb, u8 *buf,
  832. int len, int toggle_bits, int error_count, int condition_code,
  833. int repeat_number, int halted, int skipped, int actual, int non_null)
  834. {
  835. struct u132_endp *endp = data;
  836. struct u132 *u132 = endp->u132;
  837. mutex_lock(&u132->scheduler_lock);
  838. if (u132->going > 1) {
  839. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  840. , u132->going);
  841. mutex_unlock(&u132->scheduler_lock);
  842. u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
  843. return;
  844. } else if (endp->dequeueing) {
  845. endp->dequeueing = 0;
  846. mutex_unlock(&u132->scheduler_lock);
  847. u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
  848. return;
  849. } else if (u132->going > 0) {
  850. dev_err(&u132->platform_dev->dev, "device is being removed "
  851. "urb=%p\n", urb);
  852. mutex_unlock(&u132->scheduler_lock);
  853. u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
  854. return;
  855. } else if (!urb->unlinked) {
  856. mutex_unlock(&u132->scheduler_lock);
  857. u132_hcd_giveback_urb(u132, endp, urb, 0);
  858. return;
  859. } else {
  860. dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
  861. "unlinked=%d\n", urb, urb->unlinked);
  862. mutex_unlock(&u132->scheduler_lock);
  863. u132_hcd_giveback_urb(u132, endp, urb, 0);
  864. return;
  865. }
  866. }
  867. static void u132_hcd_configure_input_recv(void *data, struct urb *urb, u8 *buf,
  868. int len, int toggle_bits, int error_count, int condition_code,
  869. int repeat_number, int halted, int skipped, int actual, int non_null)
  870. {
  871. struct u132_endp *endp = data;
  872. struct u132 *u132 = endp->u132;
  873. u8 address = u132->addr[endp->usb_addr].address;
  874. mutex_lock(&u132->scheduler_lock);
  875. if (u132->going > 1) {
  876. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  877. , u132->going);
  878. mutex_unlock(&u132->scheduler_lock);
  879. u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
  880. return;
  881. } else if (endp->dequeueing) {
  882. endp->dequeueing = 0;
  883. mutex_unlock(&u132->scheduler_lock);
  884. u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
  885. return;
  886. } else if (u132->going > 0) {
  887. dev_err(&u132->platform_dev->dev, "device is being removed "
  888. "urb=%p\n", urb);
  889. mutex_unlock(&u132->scheduler_lock);
  890. u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
  891. return;
  892. } else if (!urb->unlinked) {
  893. struct u132_ring *ring = endp->ring;
  894. u8 *u = urb->transfer_buffer;
  895. u8 *b = buf;
  896. int L = len;
  897. while (L-- > 0)
  898. *u++ = *b++;
  899. urb->actual_length = len;
  900. if ((condition_code == TD_CC_NOERROR) || ((condition_code ==
  901. TD_DATAUNDERRUN) && ((urb->transfer_flags &
  902. URB_SHORT_NOT_OK) == 0))) {
  903. int retval;
  904. mutex_unlock(&u132->scheduler_lock);
  905. retval = usb_ftdi_elan_edset_empty(u132->platform_dev,
  906. ring->number, endp, urb, address,
  907. endp->usb_endp, 0x3,
  908. u132_hcd_configure_empty_sent);
  909. if (retval != 0)
  910. u132_hcd_giveback_urb(u132, endp, urb, retval);
  911. return;
  912. } else if (condition_code == TD_CC_STALL) {
  913. mutex_unlock(&u132->scheduler_lock);
  914. dev_warn(&u132->platform_dev->dev, "giving back SETUP I"
  915. "NPUT STALL urb %p\n", urb);
  916. u132_hcd_giveback_urb(u132, endp, urb,
  917. cc_to_error[condition_code]);
  918. return;
  919. } else {
  920. mutex_unlock(&u132->scheduler_lock);
  921. dev_err(&u132->platform_dev->dev, "giving back SETUP IN"
  922. "PUT %s urb %p\n", cc_to_text[condition_code],
  923. urb);
  924. u132_hcd_giveback_urb(u132, endp, urb,
  925. cc_to_error[condition_code]);
  926. return;
  927. }
  928. } else {
  929. dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
  930. "unlinked=%d\n", urb, urb->unlinked);
  931. mutex_unlock(&u132->scheduler_lock);
  932. u132_hcd_giveback_urb(u132, endp, urb, 0);
  933. return;
  934. }
  935. }
  936. static void u132_hcd_configure_empty_recv(void *data, struct urb *urb, u8 *buf,
  937. int len, int toggle_bits, int error_count, int condition_code,
  938. int repeat_number, int halted, int skipped, int actual, int non_null)
  939. {
  940. struct u132_endp *endp = data;
  941. struct u132 *u132 = endp->u132;
  942. mutex_lock(&u132->scheduler_lock);
  943. if (u132->going > 1) {
  944. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  945. , u132->going);
  946. mutex_unlock(&u132->scheduler_lock);
  947. u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
  948. return;
  949. } else if (endp->dequeueing) {
  950. endp->dequeueing = 0;
  951. mutex_unlock(&u132->scheduler_lock);
  952. u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
  953. return;
  954. } else if (u132->going > 0) {
  955. dev_err(&u132->platform_dev->dev, "device is being removed "
  956. "urb=%p\n", urb);
  957. mutex_unlock(&u132->scheduler_lock);
  958. u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
  959. return;
  960. } else if (!urb->unlinked) {
  961. mutex_unlock(&u132->scheduler_lock);
  962. u132_hcd_giveback_urb(u132, endp, urb, 0);
  963. return;
  964. } else {
  965. dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
  966. "unlinked=%d\n", urb, urb->unlinked);
  967. mutex_unlock(&u132->scheduler_lock);
  968. u132_hcd_giveback_urb(u132, endp, urb, 0);
  969. return;
  970. }
  971. }
  972. static void u132_hcd_configure_setup_sent(void *data, struct urb *urb, u8 *buf,
  973. int len, int toggle_bits, int error_count, int condition_code,
  974. int repeat_number, int halted, int skipped, int actual, int non_null)
  975. {
  976. struct u132_endp *endp = data;
  977. struct u132 *u132 = endp->u132;
  978. u8 address = u132->addr[endp->usb_addr].address;
  979. mutex_lock(&u132->scheduler_lock);
  980. if (u132->going > 1) {
  981. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  982. , u132->going);
  983. mutex_unlock(&u132->scheduler_lock);
  984. u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
  985. return;
  986. } else if (endp->dequeueing) {
  987. endp->dequeueing = 0;
  988. mutex_unlock(&u132->scheduler_lock);
  989. u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
  990. return;
  991. } else if (u132->going > 0) {
  992. dev_err(&u132->platform_dev->dev, "device is being removed "
  993. "urb=%p\n", urb);
  994. mutex_unlock(&u132->scheduler_lock);
  995. u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
  996. return;
  997. } else if (!urb->unlinked) {
  998. if (usb_pipein(urb->pipe)) {
  999. int retval;
  1000. struct u132_ring *ring = endp->ring;
  1001. mutex_unlock(&u132->scheduler_lock);
  1002. retval = usb_ftdi_elan_edset_input(u132->platform_dev,
  1003. ring->number, endp, urb, address,
  1004. endp->usb_endp, 0,
  1005. u132_hcd_configure_input_recv);
  1006. if (retval != 0)
  1007. u132_hcd_giveback_urb(u132, endp, urb, retval);
  1008. return;
  1009. } else {
  1010. int retval;
  1011. struct u132_ring *ring = endp->ring;
  1012. mutex_unlock(&u132->scheduler_lock);
  1013. retval = usb_ftdi_elan_edset_input(u132->platform_dev,
  1014. ring->number, endp, urb, address,
  1015. endp->usb_endp, 0,
  1016. u132_hcd_configure_empty_recv);
  1017. if (retval != 0)
  1018. u132_hcd_giveback_urb(u132, endp, urb, retval);
  1019. return;
  1020. }
  1021. } else {
  1022. dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
  1023. "unlinked=%d\n", urb, urb->unlinked);
  1024. mutex_unlock(&u132->scheduler_lock);
  1025. u132_hcd_giveback_urb(u132, endp, urb, 0);
  1026. return;
  1027. }
  1028. }
  1029. static void u132_hcd_enumeration_empty_recv(void *data, struct urb *urb,
  1030. u8 *buf, int len, int toggle_bits, int error_count, int condition_code,
  1031. int repeat_number, int halted, int skipped, int actual, int non_null)
  1032. {
  1033. struct u132_endp *endp = data;
  1034. struct u132 *u132 = endp->u132;
  1035. u8 address = u132->addr[endp->usb_addr].address;
  1036. struct u132_udev *udev = &u132->udev[address];
  1037. mutex_lock(&u132->scheduler_lock);
  1038. if (u132->going > 1) {
  1039. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  1040. , u132->going);
  1041. mutex_unlock(&u132->scheduler_lock);
  1042. u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
  1043. return;
  1044. } else if (endp->dequeueing) {
  1045. endp->dequeueing = 0;
  1046. mutex_unlock(&u132->scheduler_lock);
  1047. u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
  1048. return;
  1049. } else if (u132->going > 0) {
  1050. dev_err(&u132->platform_dev->dev, "device is being removed "
  1051. "urb=%p\n", urb);
  1052. mutex_unlock(&u132->scheduler_lock);
  1053. u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
  1054. return;
  1055. } else if (!urb->unlinked) {
  1056. u132->addr[0].address = 0;
  1057. endp->usb_addr = udev->usb_addr;
  1058. mutex_unlock(&u132->scheduler_lock);
  1059. u132_hcd_giveback_urb(u132, endp, urb, 0);
  1060. return;
  1061. } else {
  1062. dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
  1063. "unlinked=%d\n", urb, urb->unlinked);
  1064. mutex_unlock(&u132->scheduler_lock);
  1065. u132_hcd_giveback_urb(u132, endp, urb, 0);
  1066. return;
  1067. }
  1068. }
  1069. static void u132_hcd_enumeration_address_sent(void *data, struct urb *urb,
  1070. u8 *buf, int len, int toggle_bits, int error_count, int condition_code,
  1071. int repeat_number, int halted, int skipped, int actual, int non_null)
  1072. {
  1073. struct u132_endp *endp = data;
  1074. struct u132 *u132 = endp->u132;
  1075. mutex_lock(&u132->scheduler_lock);
  1076. if (u132->going > 1) {
  1077. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  1078. , u132->going);
  1079. mutex_unlock(&u132->scheduler_lock);
  1080. u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
  1081. return;
  1082. } else if (endp->dequeueing) {
  1083. endp->dequeueing = 0;
  1084. mutex_unlock(&u132->scheduler_lock);
  1085. u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
  1086. return;
  1087. } else if (u132->going > 0) {
  1088. dev_err(&u132->platform_dev->dev, "device is being removed "
  1089. "urb=%p\n", urb);
  1090. mutex_unlock(&u132->scheduler_lock);
  1091. u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
  1092. return;
  1093. } else if (!urb->unlinked) {
  1094. int retval;
  1095. struct u132_ring *ring = endp->ring;
  1096. mutex_unlock(&u132->scheduler_lock);
  1097. retval = usb_ftdi_elan_edset_input(u132->platform_dev,
  1098. ring->number, endp, urb, 0, endp->usb_endp, 0,
  1099. u132_hcd_enumeration_empty_recv);
  1100. if (retval != 0)
  1101. u132_hcd_giveback_urb(u132, endp, urb, retval);
  1102. return;
  1103. } else {
  1104. dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
  1105. "unlinked=%d\n", urb, urb->unlinked);
  1106. mutex_unlock(&u132->scheduler_lock);
  1107. u132_hcd_giveback_urb(u132, endp, urb, 0);
  1108. return;
  1109. }
  1110. }
  1111. static void u132_hcd_initial_empty_sent(void *data, struct urb *urb, u8 *buf,
  1112. int len, int toggle_bits, int error_count, int condition_code,
  1113. int repeat_number, int halted, int skipped, int actual, int non_null)
  1114. {
  1115. struct u132_endp *endp = data;
  1116. struct u132 *u132 = endp->u132;
  1117. mutex_lock(&u132->scheduler_lock);
  1118. if (u132->going > 1) {
  1119. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  1120. , u132->going);
  1121. mutex_unlock(&u132->scheduler_lock);
  1122. u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
  1123. return;
  1124. } else if (endp->dequeueing) {
  1125. endp->dequeueing = 0;
  1126. mutex_unlock(&u132->scheduler_lock);
  1127. u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
  1128. return;
  1129. } else if (u132->going > 0) {
  1130. dev_err(&u132->platform_dev->dev, "device is being removed "
  1131. "urb=%p\n", urb);
  1132. mutex_unlock(&u132->scheduler_lock);
  1133. u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
  1134. return;
  1135. } else if (!urb->unlinked) {
  1136. mutex_unlock(&u132->scheduler_lock);
  1137. u132_hcd_giveback_urb(u132, endp, urb, 0);
  1138. return;
  1139. } else {
  1140. dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
  1141. "unlinked=%d\n", urb, urb->unlinked);
  1142. mutex_unlock(&u132->scheduler_lock);
  1143. u132_hcd_giveback_urb(u132, endp, urb, 0);
  1144. return;
  1145. }
  1146. }
  1147. static void u132_hcd_initial_input_recv(void *data, struct urb *urb, u8 *buf,
  1148. int len, int toggle_bits, int error_count, int condition_code,
  1149. int repeat_number, int halted, int skipped, int actual, int non_null)
  1150. {
  1151. struct u132_endp *endp = data;
  1152. struct u132 *u132 = endp->u132;
  1153. u8 address = u132->addr[endp->usb_addr].address;
  1154. mutex_lock(&u132->scheduler_lock);
  1155. if (u132->going > 1) {
  1156. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  1157. , u132->going);
  1158. mutex_unlock(&u132->scheduler_lock);
  1159. u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
  1160. return;
  1161. } else if (endp->dequeueing) {
  1162. endp->dequeueing = 0;
  1163. mutex_unlock(&u132->scheduler_lock);
  1164. u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
  1165. return;
  1166. } else if (u132->going > 0) {
  1167. dev_err(&u132->platform_dev->dev, "device is being removed "
  1168. "urb=%p\n", urb);
  1169. mutex_unlock(&u132->scheduler_lock);
  1170. u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
  1171. return;
  1172. } else if (!urb->unlinked) {
  1173. int retval;
  1174. struct u132_ring *ring = endp->ring;
  1175. u8 *u = urb->transfer_buffer;
  1176. u8 *b = buf;
  1177. int L = len;
  1178. while (L-- > 0)
  1179. *u++ = *b++;
  1180. urb->actual_length = len;
  1181. mutex_unlock(&u132->scheduler_lock);
  1182. retval = usb_ftdi_elan_edset_empty(u132->platform_dev,
  1183. ring->number, endp, urb, address, endp->usb_endp, 0x3,
  1184. u132_hcd_initial_empty_sent);
  1185. if (retval != 0)
  1186. u132_hcd_giveback_urb(u132, endp, urb, retval);
  1187. return;
  1188. } else {
  1189. dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
  1190. "unlinked=%d\n", urb, urb->unlinked);
  1191. mutex_unlock(&u132->scheduler_lock);
  1192. u132_hcd_giveback_urb(u132, endp, urb, 0);
  1193. return;
  1194. }
  1195. }
  1196. static void u132_hcd_initial_setup_sent(void *data, struct urb *urb, u8 *buf,
  1197. int len, int toggle_bits, int error_count, int condition_code,
  1198. int repeat_number, int halted, int skipped, int actual, int non_null)
  1199. {
  1200. struct u132_endp *endp = data;
  1201. struct u132 *u132 = endp->u132;
  1202. u8 address = u132->addr[endp->usb_addr].address;
  1203. mutex_lock(&u132->scheduler_lock);
  1204. if (u132->going > 1) {
  1205. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  1206. , u132->going);
  1207. mutex_unlock(&u132->scheduler_lock);
  1208. u132_hcd_forget_urb(u132, endp, urb, -ENODEV);
  1209. return;
  1210. } else if (endp->dequeueing) {
  1211. endp->dequeueing = 0;
  1212. mutex_unlock(&u132->scheduler_lock);
  1213. u132_hcd_giveback_urb(u132, endp, urb, -EINTR);
  1214. return;
  1215. } else if (u132->going > 0) {
  1216. dev_err(&u132->platform_dev->dev, "device is being removed "
  1217. "urb=%p\n", urb);
  1218. mutex_unlock(&u132->scheduler_lock);
  1219. u132_hcd_giveback_urb(u132, endp, urb, -ENODEV);
  1220. return;
  1221. } else if (!urb->unlinked) {
  1222. int retval;
  1223. struct u132_ring *ring = endp->ring;
  1224. mutex_unlock(&u132->scheduler_lock);
  1225. retval = usb_ftdi_elan_edset_input(u132->platform_dev,
  1226. ring->number, endp, urb, address, endp->usb_endp, 0,
  1227. u132_hcd_initial_input_recv);
  1228. if (retval != 0)
  1229. u132_hcd_giveback_urb(u132, endp, urb, retval);
  1230. return;
  1231. } else {
  1232. dev_err(&u132->platform_dev->dev, "CALLBACK called urb=%p "
  1233. "unlinked=%d\n", urb, urb->unlinked);
  1234. mutex_unlock(&u132->scheduler_lock);
  1235. u132_hcd_giveback_urb(u132, endp, urb, 0);
  1236. return;
  1237. }
  1238. }
  1239. /*
  1240. * this work function is only executed from the work queue
  1241. *
  1242. */
  1243. static void u132_hcd_ring_work_scheduler(struct work_struct *work)
  1244. {
  1245. struct u132_ring *ring =
  1246. container_of(work, struct u132_ring, scheduler.work);
  1247. struct u132 *u132 = ring->u132;
  1248. mutex_lock(&u132->scheduler_lock);
  1249. if (ring->in_use) {
  1250. mutex_unlock(&u132->scheduler_lock);
  1251. u132_ring_put_kref(u132, ring);
  1252. return;
  1253. } else if (ring->curr_endp) {
  1254. struct u132_endp *last_endp = ring->curr_endp;
  1255. struct list_head *scan;
  1256. struct list_head *head = &last_endp->endp_ring;
  1257. unsigned long wakeup = 0;
  1258. list_for_each(scan, head) {
  1259. struct u132_endp *endp = list_entry(scan,
  1260. struct u132_endp, endp_ring);
  1261. if (endp->queue_next == endp->queue_last) {
  1262. } else if ((endp->delayed == 0)
  1263. || time_after_eq(jiffies, endp->jiffies)) {
  1264. ring->curr_endp = endp;
  1265. u132_endp_cancel_work(u132, last_endp);
  1266. u132_endp_queue_work(u132, last_endp, 0);
  1267. mutex_unlock(&u132->scheduler_lock);
  1268. u132_ring_put_kref(u132, ring);
  1269. return;
  1270. } else {
  1271. unsigned long delta = endp->jiffies - jiffies;
  1272. if (delta > wakeup)
  1273. wakeup = delta;
  1274. }
  1275. }
  1276. if (last_endp->queue_next == last_endp->queue_last) {
  1277. } else if ((last_endp->delayed == 0) || time_after_eq(jiffies,
  1278. last_endp->jiffies)) {
  1279. u132_endp_cancel_work(u132, last_endp);
  1280. u132_endp_queue_work(u132, last_endp, 0);
  1281. mutex_unlock(&u132->scheduler_lock);
  1282. u132_ring_put_kref(u132, ring);
  1283. return;
  1284. } else {
  1285. unsigned long delta = last_endp->jiffies - jiffies;
  1286. if (delta > wakeup)
  1287. wakeup = delta;
  1288. }
  1289. if (wakeup > 0) {
  1290. u132_ring_requeue_work(u132, ring, wakeup);
  1291. mutex_unlock(&u132->scheduler_lock);
  1292. return;
  1293. } else {
  1294. mutex_unlock(&u132->scheduler_lock);
  1295. u132_ring_put_kref(u132, ring);
  1296. return;
  1297. }
  1298. } else {
  1299. mutex_unlock(&u132->scheduler_lock);
  1300. u132_ring_put_kref(u132, ring);
  1301. return;
  1302. }
  1303. }
  1304. static void u132_hcd_endp_work_scheduler(struct work_struct *work)
  1305. {
  1306. struct u132_ring *ring;
  1307. struct u132_endp *endp =
  1308. container_of(work, struct u132_endp, scheduler.work);
  1309. struct u132 *u132 = endp->u132;
  1310. mutex_lock(&u132->scheduler_lock);
  1311. ring = endp->ring;
  1312. if (endp->edset_flush) {
  1313. endp->edset_flush = 0;
  1314. if (endp->dequeueing)
  1315. usb_ftdi_elan_edset_flush(u132->platform_dev,
  1316. ring->number, endp);
  1317. mutex_unlock(&u132->scheduler_lock);
  1318. u132_endp_put_kref(u132, endp);
  1319. return;
  1320. } else if (endp->active) {
  1321. mutex_unlock(&u132->scheduler_lock);
  1322. u132_endp_put_kref(u132, endp);
  1323. return;
  1324. } else if (ring->in_use) {
  1325. mutex_unlock(&u132->scheduler_lock);
  1326. u132_endp_put_kref(u132, endp);
  1327. return;
  1328. } else if (endp->queue_next == endp->queue_last) {
  1329. mutex_unlock(&u132->scheduler_lock);
  1330. u132_endp_put_kref(u132, endp);
  1331. return;
  1332. } else if (endp->pipetype == PIPE_INTERRUPT) {
  1333. u8 address = u132->addr[endp->usb_addr].address;
  1334. if (ring->in_use) {
  1335. mutex_unlock(&u132->scheduler_lock);
  1336. u132_endp_put_kref(u132, endp);
  1337. return;
  1338. } else {
  1339. int retval;
  1340. struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
  1341. endp->queue_next];
  1342. endp->active = 1;
  1343. ring->curr_endp = endp;
  1344. ring->in_use = 1;
  1345. mutex_unlock(&u132->scheduler_lock);
  1346. retval = edset_single(u132, ring, endp, urb, address,
  1347. endp->toggle_bits, u132_hcd_interrupt_recv);
  1348. if (retval != 0)
  1349. u132_hcd_giveback_urb(u132, endp, urb, retval);
  1350. return;
  1351. }
  1352. } else if (endp->pipetype == PIPE_CONTROL) {
  1353. u8 address = u132->addr[endp->usb_addr].address;
  1354. if (ring->in_use) {
  1355. mutex_unlock(&u132->scheduler_lock);
  1356. u132_endp_put_kref(u132, endp);
  1357. return;
  1358. } else if (address == 0) {
  1359. int retval;
  1360. struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
  1361. endp->queue_next];
  1362. endp->active = 1;
  1363. ring->curr_endp = endp;
  1364. ring->in_use = 1;
  1365. mutex_unlock(&u132->scheduler_lock);
  1366. retval = edset_setup(u132, ring, endp, urb, address,
  1367. 0x2, u132_hcd_initial_setup_sent);
  1368. if (retval != 0)
  1369. u132_hcd_giveback_urb(u132, endp, urb, retval);
  1370. return;
  1371. } else if (endp->usb_addr == 0) {
  1372. int retval;
  1373. struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
  1374. endp->queue_next];
  1375. endp->active = 1;
  1376. ring->curr_endp = endp;
  1377. ring->in_use = 1;
  1378. mutex_unlock(&u132->scheduler_lock);
  1379. retval = edset_setup(u132, ring, endp, urb, 0, 0x2,
  1380. u132_hcd_enumeration_address_sent);
  1381. if (retval != 0)
  1382. u132_hcd_giveback_urb(u132, endp, urb, retval);
  1383. return;
  1384. } else {
  1385. int retval;
  1386. struct urb *urb = endp->urb_list[ENDP_QUEUE_MASK &
  1387. endp->queue_next];
  1388. address = u132->addr[endp->usb_addr].address;
  1389. endp->active = 1;
  1390. ring->curr_endp = endp;
  1391. ring->in_use = 1;
  1392. mutex_unlock(&u132->scheduler_lock);
  1393. retval = edset_setup(u132, ring, endp, urb, address,
  1394. 0x2, u132_hcd_configure_setup_sent);
  1395. if (retval != 0)
  1396. u132_hcd_giveback_urb(u132, endp, urb, retval);
  1397. return;
  1398. }
  1399. } else {
  1400. if (endp->input) {
  1401. u8 address = u132->addr[endp->usb_addr].address;
  1402. if (ring->in_use) {
  1403. mutex_unlock(&u132->scheduler_lock);
  1404. u132_endp_put_kref(u132, endp);
  1405. return;
  1406. } else {
  1407. int retval;
  1408. struct urb *urb = endp->urb_list[
  1409. ENDP_QUEUE_MASK & endp->queue_next];
  1410. endp->active = 1;
  1411. ring->curr_endp = endp;
  1412. ring->in_use = 1;
  1413. mutex_unlock(&u132->scheduler_lock);
  1414. retval = edset_input(u132, ring, endp, urb,
  1415. address, endp->toggle_bits,
  1416. u132_hcd_bulk_input_recv);
  1417. if (retval == 0) {
  1418. } else
  1419. u132_hcd_giveback_urb(u132, endp, urb,
  1420. retval);
  1421. return;
  1422. }
  1423. } else { /* output pipe */
  1424. u8 address = u132->addr[endp->usb_addr].address;
  1425. if (ring->in_use) {
  1426. mutex_unlock(&u132->scheduler_lock);
  1427. u132_endp_put_kref(u132, endp);
  1428. return;
  1429. } else {
  1430. int retval;
  1431. struct urb *urb = endp->urb_list[
  1432. ENDP_QUEUE_MASK & endp->queue_next];
  1433. endp->active = 1;
  1434. ring->curr_endp = endp;
  1435. ring->in_use = 1;
  1436. mutex_unlock(&u132->scheduler_lock);
  1437. retval = edset_output(u132, ring, endp, urb,
  1438. address, endp->toggle_bits,
  1439. u132_hcd_bulk_output_sent);
  1440. if (retval == 0) {
  1441. } else
  1442. u132_hcd_giveback_urb(u132, endp, urb,
  1443. retval);
  1444. return;
  1445. }
  1446. }
  1447. }
  1448. }
  1449. #ifdef CONFIG_PM
  1450. static void port_power(struct u132 *u132, int pn, int is_on)
  1451. {
  1452. u132->port[pn].power = is_on;
  1453. }
  1454. #endif
  1455. static void u132_power(struct u132 *u132, int is_on)
  1456. {
  1457. struct usb_hcd *hcd = u132_to_hcd(u132)
  1458. ; /* hub is inactive unless the port is powered */
  1459. if (is_on) {
  1460. if (u132->power)
  1461. return;
  1462. u132->power = 1;
  1463. } else {
  1464. u132->power = 0;
  1465. hcd->state = HC_STATE_HALT;
  1466. }
  1467. }
  1468. static int u132_periodic_reinit(struct u132 *u132)
  1469. {
  1470. int retval;
  1471. u32 fi = u132->hc_fminterval & 0x03fff;
  1472. u32 fit;
  1473. u32 fminterval;
  1474. retval = u132_read_pcimem(u132, fminterval, &fminterval);
  1475. if (retval)
  1476. return retval;
  1477. fit = fminterval & FIT;
  1478. retval = u132_write_pcimem(u132, fminterval,
  1479. (fit ^ FIT) | u132->hc_fminterval);
  1480. if (retval)
  1481. return retval;
  1482. retval = u132_write_pcimem(u132, periodicstart,
  1483. ((9 * fi) / 10) & 0x3fff);
  1484. if (retval)
  1485. return retval;
  1486. return 0;
  1487. }
  1488. static char *hcfs2string(int state)
  1489. {
  1490. switch (state) {
  1491. case OHCI_USB_RESET:
  1492. return "reset";
  1493. case OHCI_USB_RESUME:
  1494. return "resume";
  1495. case OHCI_USB_OPER:
  1496. return "operational";
  1497. case OHCI_USB_SUSPEND:
  1498. return "suspend";
  1499. }
  1500. return "?";
  1501. }
  1502. static int u132_init(struct u132 *u132)
  1503. {
  1504. int retval;
  1505. u32 control;
  1506. u132_disable(u132);
  1507. u132->next_statechange = jiffies;
  1508. retval = u132_write_pcimem(u132, intrdisable, OHCI_INTR_MIE);
  1509. if (retval)
  1510. return retval;
  1511. retval = u132_read_pcimem(u132, control, &control);
  1512. if (retval)
  1513. return retval;
  1514. if (u132->num_ports == 0) {
  1515. u32 rh_a = -1;
  1516. retval = u132_read_pcimem(u132, roothub.a, &rh_a);
  1517. if (retval)
  1518. return retval;
  1519. u132->num_ports = rh_a & RH_A_NDP;
  1520. retval = read_roothub_info(u132);
  1521. if (retval)
  1522. return retval;
  1523. }
  1524. if (u132->num_ports > MAX_U132_PORTS)
  1525. return -EINVAL;
  1526. return 0;
  1527. }
  1528. /* Start an OHCI controller, set the BUS operational
  1529. * resets USB and controller
  1530. * enable interrupts
  1531. */
  1532. static int u132_run(struct u132 *u132)
  1533. {
  1534. int retval;
  1535. u32 control;
  1536. u32 status;
  1537. u32 fminterval;
  1538. u32 periodicstart;
  1539. u32 cmdstatus;
  1540. u32 roothub_a;
  1541. int mask = OHCI_INTR_INIT;
  1542. int first = u132->hc_fminterval == 0;
  1543. int sleep_time = 0;
  1544. int reset_timeout = 30; /* ... allow extra time */
  1545. u132_disable(u132);
  1546. if (first) {
  1547. u32 temp;
  1548. retval = u132_read_pcimem(u132, fminterval, &temp);
  1549. if (retval)
  1550. return retval;
  1551. u132->hc_fminterval = temp & 0x3fff;
  1552. u132->hc_fminterval |= FSMP(u132->hc_fminterval) << 16;
  1553. }
  1554. retval = u132_read_pcimem(u132, control, &u132->hc_control);
  1555. if (retval)
  1556. return retval;
  1557. dev_info(&u132->platform_dev->dev, "resetting from state '%s', control "
  1558. "= %08X\n", hcfs2string(u132->hc_control & OHCI_CTRL_HCFS),
  1559. u132->hc_control);
  1560. switch (u132->hc_control & OHCI_CTRL_HCFS) {
  1561. case OHCI_USB_OPER:
  1562. sleep_time = 0;
  1563. break;
  1564. case OHCI_USB_SUSPEND:
  1565. case OHCI_USB_RESUME:
  1566. u132->hc_control &= OHCI_CTRL_RWC;
  1567. u132->hc_control |= OHCI_USB_RESUME;
  1568. sleep_time = 10;
  1569. break;
  1570. default:
  1571. u132->hc_control &= OHCI_CTRL_RWC;
  1572. u132->hc_control |= OHCI_USB_RESET;
  1573. sleep_time = 50;
  1574. break;
  1575. }
  1576. retval = u132_write_pcimem(u132, control, u132->hc_control);
  1577. if (retval)
  1578. return retval;
  1579. retval = u132_read_pcimem(u132, control, &control);
  1580. if (retval)
  1581. return retval;
  1582. msleep(sleep_time);
  1583. retval = u132_read_pcimem(u132, roothub.a, &roothub_a);
  1584. if (retval)
  1585. return retval;
  1586. if (!(roothub_a & RH_A_NPS)) {
  1587. int temp; /* power down each port */
  1588. for (temp = 0; temp < u132->num_ports; temp++) {
  1589. retval = u132_write_pcimem(u132,
  1590. roothub.portstatus[temp], RH_PS_LSDA);
  1591. if (retval)
  1592. return retval;
  1593. }
  1594. }
  1595. retval = u132_read_pcimem(u132, control, &control);
  1596. if (retval)
  1597. return retval;
  1598. retry:
  1599. retval = u132_read_pcimem(u132, cmdstatus, &status);
  1600. if (retval)
  1601. return retval;
  1602. retval = u132_write_pcimem(u132, cmdstatus, OHCI_HCR);
  1603. if (retval)
  1604. return retval;
  1605. extra: {
  1606. retval = u132_read_pcimem(u132, cmdstatus, &status);
  1607. if (retval)
  1608. return retval;
  1609. if (0 != (status & OHCI_HCR)) {
  1610. if (--reset_timeout == 0) {
  1611. dev_err(&u132->platform_dev->dev, "USB HC reset"
  1612. " timed out!\n");
  1613. return -ENODEV;
  1614. } else {
  1615. msleep(5);
  1616. goto extra;
  1617. }
  1618. }
  1619. }
  1620. if (u132->flags & OHCI_QUIRK_INITRESET) {
  1621. retval = u132_write_pcimem(u132, control, u132->hc_control);
  1622. if (retval)
  1623. return retval;
  1624. retval = u132_read_pcimem(u132, control, &control);
  1625. if (retval)
  1626. return retval;
  1627. }
  1628. retval = u132_write_pcimem(u132, ed_controlhead, 0x00000000);
  1629. if (retval)
  1630. return retval;
  1631. retval = u132_write_pcimem(u132, ed_bulkhead, 0x11000000);
  1632. if (retval)
  1633. return retval;
  1634. retval = u132_write_pcimem(u132, hcca, 0x00000000);
  1635. if (retval)
  1636. return retval;
  1637. retval = u132_periodic_reinit(u132);
  1638. if (retval)
  1639. return retval;
  1640. retval = u132_read_pcimem(u132, fminterval, &fminterval);
  1641. if (retval)
  1642. return retval;
  1643. retval = u132_read_pcimem(u132, periodicstart, &periodicstart);
  1644. if (retval)
  1645. return retval;
  1646. if (0 == (fminterval & 0x3fff0000) || 0 == periodicstart) {
  1647. if (!(u132->flags & OHCI_QUIRK_INITRESET)) {
  1648. u132->flags |= OHCI_QUIRK_INITRESET;
  1649. goto retry;
  1650. } else
  1651. dev_err(&u132->platform_dev->dev, "init err(%08x %04x)"
  1652. "\n", fminterval, periodicstart);
  1653. } /* start controller operations */
  1654. u132->hc_control &= OHCI_CTRL_RWC;
  1655. u132->hc_control |= OHCI_CONTROL_INIT | OHCI_CTRL_BLE | OHCI_USB_OPER;
  1656. retval = u132_write_pcimem(u132, control, u132->hc_control);
  1657. if (retval)
  1658. return retval;
  1659. retval = u132_write_pcimem(u132, cmdstatus, OHCI_BLF);
  1660. if (retval)
  1661. return retval;
  1662. retval = u132_read_pcimem(u132, cmdstatus, &cmdstatus);
  1663. if (retval)
  1664. return retval;
  1665. retval = u132_read_pcimem(u132, control, &control);
  1666. if (retval)
  1667. return retval;
  1668. u132_to_hcd(u132)->state = HC_STATE_RUNNING;
  1669. retval = u132_write_pcimem(u132, roothub.status, RH_HS_DRWE);
  1670. if (retval)
  1671. return retval;
  1672. retval = u132_write_pcimem(u132, intrstatus, mask);
  1673. if (retval)
  1674. return retval;
  1675. retval = u132_write_pcimem(u132, intrdisable,
  1676. OHCI_INTR_MIE | OHCI_INTR_OC | OHCI_INTR_RHSC | OHCI_INTR_FNO |
  1677. OHCI_INTR_UE | OHCI_INTR_RD | OHCI_INTR_SF | OHCI_INTR_WDH |
  1678. OHCI_INTR_SO);
  1679. if (retval)
  1680. return retval; /* handle root hub init quirks ... */
  1681. retval = u132_read_pcimem(u132, roothub.a, &roothub_a);
  1682. if (retval)
  1683. return retval;
  1684. roothub_a &= ~(RH_A_PSM | RH_A_OCPM);
  1685. if (u132->flags & OHCI_QUIRK_SUPERIO) {
  1686. roothub_a |= RH_A_NOCP;
  1687. roothub_a &= ~(RH_A_POTPGT | RH_A_NPS);
  1688. retval = u132_write_pcimem(u132, roothub.a, roothub_a);
  1689. if (retval)
  1690. return retval;
  1691. } else if ((u132->flags & OHCI_QUIRK_AMD756) || distrust_firmware) {
  1692. roothub_a |= RH_A_NPS;
  1693. retval = u132_write_pcimem(u132, roothub.a, roothub_a);
  1694. if (retval)
  1695. return retval;
  1696. }
  1697. retval = u132_write_pcimem(u132, roothub.status, RH_HS_LPSC);
  1698. if (retval)
  1699. return retval;
  1700. retval = u132_write_pcimem(u132, roothub.b,
  1701. (roothub_a & RH_A_NPS) ? 0 : RH_B_PPCM);
  1702. if (retval)
  1703. return retval;
  1704. retval = u132_read_pcimem(u132, control, &control);
  1705. if (retval)
  1706. return retval;
  1707. mdelay((roothub_a >> 23) & 0x1fe);
  1708. u132_to_hcd(u132)->state = HC_STATE_RUNNING;
  1709. return 0;
  1710. }
  1711. static void u132_hcd_stop(struct usb_hcd *hcd)
  1712. {
  1713. struct u132 *u132 = hcd_to_u132(hcd);
  1714. if (u132->going > 1) {
  1715. dev_err(&u132->platform_dev->dev, "u132 device %p(hcd=%p) has b"
  1716. "een removed %d\n", u132, hcd, u132->going);
  1717. } else if (u132->going > 0) {
  1718. dev_err(&u132->platform_dev->dev, "device hcd=%p is being remov"
  1719. "ed\n", hcd);
  1720. } else {
  1721. mutex_lock(&u132->sw_lock);
  1722. msleep(100);
  1723. u132_power(u132, 0);
  1724. mutex_unlock(&u132->sw_lock);
  1725. }
  1726. }
  1727. static int u132_hcd_start(struct usb_hcd *hcd)
  1728. {
  1729. struct u132 *u132 = hcd_to_u132(hcd);
  1730. if (u132->going > 1) {
  1731. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  1732. , u132->going);
  1733. return -ENODEV;
  1734. } else if (u132->going > 0) {
  1735. dev_err(&u132->platform_dev->dev, "device is being removed\n");
  1736. return -ESHUTDOWN;
  1737. } else if (hcd->self.controller) {
  1738. int retval;
  1739. struct platform_device *pdev =
  1740. to_platform_device(hcd->self.controller);
  1741. u16 vendor = ((struct u132_platform_data *)
  1742. (pdev->dev.platform_data))->vendor;
  1743. u16 device = ((struct u132_platform_data *)
  1744. (pdev->dev.platform_data))->device;
  1745. mutex_lock(&u132->sw_lock);
  1746. msleep(10);
  1747. if (vendor == PCI_VENDOR_ID_AMD && device == 0x740c) {
  1748. u132->flags = OHCI_QUIRK_AMD756;
  1749. } else if (vendor == PCI_VENDOR_ID_OPTI && device == 0xc861) {
  1750. dev_err(&u132->platform_dev->dev, "WARNING: OPTi workar"
  1751. "ounds unavailable\n");
  1752. } else if (vendor == PCI_VENDOR_ID_COMPAQ && device == 0xa0f8)
  1753. u132->flags |= OHCI_QUIRK_ZFMICRO;
  1754. retval = u132_run(u132);
  1755. if (retval) {
  1756. u132_disable(u132);
  1757. u132->going = 1;
  1758. }
  1759. msleep(100);
  1760. mutex_unlock(&u132->sw_lock);
  1761. return retval;
  1762. } else {
  1763. dev_err(&u132->platform_dev->dev, "platform_device missing\n");
  1764. return -ENODEV;
  1765. }
  1766. }
  1767. static int u132_hcd_reset(struct usb_hcd *hcd)
  1768. {
  1769. struct u132 *u132 = hcd_to_u132(hcd);
  1770. if (u132->going > 1) {
  1771. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  1772. , u132->going);
  1773. return -ENODEV;
  1774. } else if (u132->going > 0) {
  1775. dev_err(&u132->platform_dev->dev, "device is being removed\n");
  1776. return -ESHUTDOWN;
  1777. } else {
  1778. int retval;
  1779. mutex_lock(&u132->sw_lock);
  1780. retval = u132_init(u132);
  1781. if (retval) {
  1782. u132_disable(u132);
  1783. u132->going = 1;
  1784. }
  1785. mutex_unlock(&u132->sw_lock);
  1786. return retval;
  1787. }
  1788. }
  1789. static int create_endpoint_and_queue_int(struct u132 *u132,
  1790. struct u132_udev *udev, struct urb *urb,
  1791. struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
  1792. gfp_t mem_flags)
  1793. {
  1794. struct u132_ring *ring;
  1795. unsigned long irqs;
  1796. int rc;
  1797. u8 endp_number;
  1798. struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
  1799. if (!endp)
  1800. return -ENOMEM;
  1801. spin_lock_init(&endp->queue_lock.slock);
  1802. spin_lock_irqsave(&endp->queue_lock.slock, irqs);
  1803. rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
  1804. if (rc) {
  1805. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  1806. kfree(endp);
  1807. return rc;
  1808. }
  1809. endp_number = ++u132->num_endpoints;
  1810. urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
  1811. INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
  1812. INIT_LIST_HEAD(&endp->urb_more);
  1813. ring = endp->ring = &u132->ring[0];
  1814. if (ring->curr_endp) {
  1815. list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
  1816. } else {
  1817. INIT_LIST_HEAD(&endp->endp_ring);
  1818. ring->curr_endp = endp;
  1819. }
  1820. ring->length += 1;
  1821. endp->dequeueing = 0;
  1822. endp->edset_flush = 0;
  1823. endp->active = 0;
  1824. endp->delayed = 0;
  1825. endp->endp_number = endp_number;
  1826. endp->u132 = u132;
  1827. endp->hep = urb->ep;
  1828. endp->pipetype = usb_pipetype(urb->pipe);
  1829. u132_endp_init_kref(u132, endp);
  1830. if (usb_pipein(urb->pipe)) {
  1831. endp->toggle_bits = 0x2;
  1832. usb_settoggle(udev->usb_device, usb_endp, 0, 0);
  1833. endp->input = 1;
  1834. endp->output = 0;
  1835. udev->endp_number_in[usb_endp] = endp_number;
  1836. u132_udev_get_kref(u132, udev);
  1837. } else {
  1838. endp->toggle_bits = 0x2;
  1839. usb_settoggle(udev->usb_device, usb_endp, 1, 0);
  1840. endp->input = 0;
  1841. endp->output = 1;
  1842. udev->endp_number_out[usb_endp] = endp_number;
  1843. u132_udev_get_kref(u132, udev);
  1844. }
  1845. urb->hcpriv = u132;
  1846. endp->delayed = 1;
  1847. endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
  1848. endp->udev_number = address;
  1849. endp->usb_addr = usb_addr;
  1850. endp->usb_endp = usb_endp;
  1851. endp->queue_size = 1;
  1852. endp->queue_last = 0;
  1853. endp->queue_next = 0;
  1854. endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
  1855. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  1856. u132_endp_queue_work(u132, endp, msecs_to_jiffies(urb->interval));
  1857. return 0;
  1858. }
  1859. static int queue_int_on_old_endpoint(struct u132 *u132,
  1860. struct u132_udev *udev, struct urb *urb,
  1861. struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
  1862. u8 usb_endp, u8 address)
  1863. {
  1864. urb->hcpriv = u132;
  1865. endp->delayed = 1;
  1866. endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
  1867. if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
  1868. endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
  1869. } else {
  1870. struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq),
  1871. GFP_ATOMIC);
  1872. if (urbq == NULL) {
  1873. endp->queue_size -= 1;
  1874. return -ENOMEM;
  1875. } else {
  1876. list_add_tail(&urbq->urb_more, &endp->urb_more);
  1877. urbq->urb = urb;
  1878. }
  1879. }
  1880. return 0;
  1881. }
  1882. static int create_endpoint_and_queue_bulk(struct u132 *u132,
  1883. struct u132_udev *udev, struct urb *urb,
  1884. struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
  1885. gfp_t mem_flags)
  1886. {
  1887. int ring_number;
  1888. struct u132_ring *ring;
  1889. unsigned long irqs;
  1890. int rc;
  1891. u8 endp_number;
  1892. struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
  1893. if (!endp)
  1894. return -ENOMEM;
  1895. spin_lock_init(&endp->queue_lock.slock);
  1896. spin_lock_irqsave(&endp->queue_lock.slock, irqs);
  1897. rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
  1898. if (rc) {
  1899. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  1900. kfree(endp);
  1901. return rc;
  1902. }
  1903. endp_number = ++u132->num_endpoints;
  1904. urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
  1905. INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
  1906. INIT_LIST_HEAD(&endp->urb_more);
  1907. endp->dequeueing = 0;
  1908. endp->edset_flush = 0;
  1909. endp->active = 0;
  1910. endp->delayed = 0;
  1911. endp->endp_number = endp_number;
  1912. endp->u132 = u132;
  1913. endp->hep = urb->ep;
  1914. endp->pipetype = usb_pipetype(urb->pipe);
  1915. u132_endp_init_kref(u132, endp);
  1916. if (usb_pipein(urb->pipe)) {
  1917. endp->toggle_bits = 0x2;
  1918. usb_settoggle(udev->usb_device, usb_endp, 0, 0);
  1919. ring_number = 3;
  1920. endp->input = 1;
  1921. endp->output = 0;
  1922. udev->endp_number_in[usb_endp] = endp_number;
  1923. u132_udev_get_kref(u132, udev);
  1924. } else {
  1925. endp->toggle_bits = 0x2;
  1926. usb_settoggle(udev->usb_device, usb_endp, 1, 0);
  1927. ring_number = 2;
  1928. endp->input = 0;
  1929. endp->output = 1;
  1930. udev->endp_number_out[usb_endp] = endp_number;
  1931. u132_udev_get_kref(u132, udev);
  1932. }
  1933. ring = endp->ring = &u132->ring[ring_number - 1];
  1934. if (ring->curr_endp) {
  1935. list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
  1936. } else {
  1937. INIT_LIST_HEAD(&endp->endp_ring);
  1938. ring->curr_endp = endp;
  1939. }
  1940. ring->length += 1;
  1941. urb->hcpriv = u132;
  1942. endp->udev_number = address;
  1943. endp->usb_addr = usb_addr;
  1944. endp->usb_endp = usb_endp;
  1945. endp->queue_size = 1;
  1946. endp->queue_last = 0;
  1947. endp->queue_next = 0;
  1948. endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
  1949. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  1950. u132_endp_queue_work(u132, endp, 0);
  1951. return 0;
  1952. }
  1953. static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
  1954. struct urb *urb,
  1955. struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
  1956. u8 usb_endp, u8 address)
  1957. {
  1958. urb->hcpriv = u132;
  1959. if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
  1960. endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
  1961. } else {
  1962. struct u132_urbq *urbq = kmalloc(sizeof(struct u132_urbq),
  1963. GFP_ATOMIC);
  1964. if (urbq == NULL) {
  1965. endp->queue_size -= 1;
  1966. return -ENOMEM;
  1967. } else {
  1968. list_add_tail(&urbq->urb_more, &endp->urb_more);
  1969. urbq->urb = urb;
  1970. }
  1971. }
  1972. return 0;
  1973. }
  1974. static int create_endpoint_and_queue_control(struct u132 *u132,
  1975. struct urb *urb,
  1976. struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp,
  1977. gfp_t mem_flags)
  1978. {
  1979. struct u132_ring *ring;
  1980. unsigned long irqs;
  1981. int rc;
  1982. u8 endp_number;
  1983. struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
  1984. if (!endp)
  1985. return -ENOMEM;
  1986. spin_lock_init(&endp->queue_lock.slock);
  1987. spin_lock_irqsave(&endp->queue_lock.slock, irqs);
  1988. rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
  1989. if (rc) {
  1990. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  1991. kfree(endp);
  1992. return rc;
  1993. }
  1994. endp_number = ++u132->num_endpoints;
  1995. urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
  1996. INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
  1997. INIT_LIST_HEAD(&endp->urb_more);
  1998. ring = endp->ring = &u132->ring[0];
  1999. if (ring->curr_endp) {
  2000. list_add_tail(&endp->endp_ring, &ring->curr_endp->endp_ring);
  2001. } else {
  2002. INIT_LIST_HEAD(&endp->endp_ring);
  2003. ring->curr_endp = endp;
  2004. }
  2005. ring->length += 1;
  2006. endp->dequeueing = 0;
  2007. endp->edset_flush = 0;
  2008. endp->active = 0;
  2009. endp->delayed = 0;
  2010. endp->endp_number = endp_number;
  2011. endp->u132 = u132;
  2012. endp->hep = urb->ep;
  2013. u132_endp_init_kref(u132, endp);
  2014. u132_endp_get_kref(u132, endp);
  2015. if (usb_addr == 0) {
  2016. u8 address = u132->addr[usb_addr].address;
  2017. struct u132_udev *udev = &u132->udev[address];
  2018. endp->udev_number = address;
  2019. endp->usb_addr = usb_addr;
  2020. endp->usb_endp = usb_endp;
  2021. endp->input = 1;
  2022. endp->output = 1;
  2023. endp->pipetype = usb_pipetype(urb->pipe);
  2024. u132_udev_init_kref(u132, udev);
  2025. u132_udev_get_kref(u132, udev);
  2026. udev->endp_number_in[usb_endp] = endp_number;
  2027. udev->endp_number_out[usb_endp] = endp_number;
  2028. urb->hcpriv = u132;
  2029. endp->queue_size = 1;
  2030. endp->queue_last = 0;
  2031. endp->queue_next = 0;
  2032. endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
  2033. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  2034. u132_endp_queue_work(u132, endp, 0);
  2035. return 0;
  2036. } else { /*(usb_addr > 0) */
  2037. u8 address = u132->addr[usb_addr].address;
  2038. struct u132_udev *udev = &u132->udev[address];
  2039. endp->udev_number = address;
  2040. endp->usb_addr = usb_addr;
  2041. endp->usb_endp = usb_endp;
  2042. endp->input = 1;
  2043. endp->output = 1;
  2044. endp->pipetype = usb_pipetype(urb->pipe);
  2045. u132_udev_get_kref(u132, udev);
  2046. udev->enumeration = 2;
  2047. udev->endp_number_in[usb_endp] = endp_number;
  2048. udev->endp_number_out[usb_endp] = endp_number;
  2049. urb->hcpriv = u132;
  2050. endp->queue_size = 1;
  2051. endp->queue_last = 0;
  2052. endp->queue_next = 0;
  2053. endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] = urb;
  2054. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  2055. u132_endp_queue_work(u132, endp, 0);
  2056. return 0;
  2057. }
  2058. }
  2059. static int queue_control_on_old_endpoint(struct u132 *u132,
  2060. struct urb *urb,
  2061. struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
  2062. u8 usb_endp)
  2063. {
  2064. if (usb_addr == 0) {
  2065. if (usb_pipein(urb->pipe)) {
  2066. urb->hcpriv = u132;
  2067. if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
  2068. endp->urb_list[ENDP_QUEUE_MASK &
  2069. endp->queue_last++] = urb;
  2070. } else {
  2071. struct u132_urbq *urbq =
  2072. kmalloc(sizeof(struct u132_urbq),
  2073. GFP_ATOMIC);
  2074. if (urbq == NULL) {
  2075. endp->queue_size -= 1;
  2076. return -ENOMEM;
  2077. } else {
  2078. list_add_tail(&urbq->urb_more,
  2079. &endp->urb_more);
  2080. urbq->urb = urb;
  2081. }
  2082. }
  2083. return 0;
  2084. } else { /* usb_pipeout(urb->pipe) */
  2085. struct u132_addr *addr = &u132->addr[usb_dev->devnum];
  2086. int I = MAX_U132_UDEVS;
  2087. int i = 0;
  2088. while (--I > 0) {
  2089. struct u132_udev *udev = &u132->udev[++i];
  2090. if (udev->usb_device) {
  2091. continue;
  2092. } else {
  2093. udev->enumeration = 1;
  2094. u132->addr[0].address = i;
  2095. endp->udev_number = i;
  2096. udev->udev_number = i;
  2097. udev->usb_addr = usb_dev->devnum;
  2098. u132_udev_init_kref(u132, udev);
  2099. udev->endp_number_in[usb_endp] =
  2100. endp->endp_number;
  2101. u132_udev_get_kref(u132, udev);
  2102. udev->endp_number_out[usb_endp] =
  2103. endp->endp_number;
  2104. udev->usb_device = usb_dev;
  2105. ((u8 *) (urb->setup_packet))[2] =
  2106. addr->address = i;
  2107. u132_udev_get_kref(u132, udev);
  2108. break;
  2109. }
  2110. }
  2111. if (I == 0) {
  2112. dev_err(&u132->platform_dev->dev, "run out of d"
  2113. "evice space\n");
  2114. return -EINVAL;
  2115. }
  2116. urb->hcpriv = u132;
  2117. if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
  2118. endp->urb_list[ENDP_QUEUE_MASK &
  2119. endp->queue_last++] = urb;
  2120. } else {
  2121. struct u132_urbq *urbq =
  2122. kmalloc(sizeof(struct u132_urbq),
  2123. GFP_ATOMIC);
  2124. if (urbq == NULL) {
  2125. endp->queue_size -= 1;
  2126. return -ENOMEM;
  2127. } else {
  2128. list_add_tail(&urbq->urb_more,
  2129. &endp->urb_more);
  2130. urbq->urb = urb;
  2131. }
  2132. }
  2133. return 0;
  2134. }
  2135. } else { /*(usb_addr > 0) */
  2136. u8 address = u132->addr[usb_addr].address;
  2137. struct u132_udev *udev = &u132->udev[address];
  2138. urb->hcpriv = u132;
  2139. if (udev->enumeration != 2)
  2140. udev->enumeration = 2;
  2141. if (endp->queue_size++ < ENDP_QUEUE_SIZE) {
  2142. endp->urb_list[ENDP_QUEUE_MASK & endp->queue_last++] =
  2143. urb;
  2144. } else {
  2145. struct u132_urbq *urbq =
  2146. kmalloc(sizeof(struct u132_urbq), GFP_ATOMIC);
  2147. if (urbq == NULL) {
  2148. endp->queue_size -= 1;
  2149. return -ENOMEM;
  2150. } else {
  2151. list_add_tail(&urbq->urb_more, &endp->urb_more);
  2152. urbq->urb = urb;
  2153. }
  2154. }
  2155. return 0;
  2156. }
  2157. }
  2158. static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
  2159. gfp_t mem_flags)
  2160. {
  2161. struct u132 *u132 = hcd_to_u132(hcd);
  2162. if (irqs_disabled()) {
  2163. if (__GFP_WAIT & mem_flags) {
  2164. printk(KERN_ERR "invalid context for function that migh"
  2165. "t sleep\n");
  2166. return -EINVAL;
  2167. }
  2168. }
  2169. if (u132->going > 1) {
  2170. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  2171. , u132->going);
  2172. return -ENODEV;
  2173. } else if (u132->going > 0) {
  2174. dev_err(&u132->platform_dev->dev, "device is being removed "
  2175. "urb=%p\n", urb);
  2176. return -ESHUTDOWN;
  2177. } else {
  2178. u8 usb_addr = usb_pipedevice(urb->pipe);
  2179. u8 usb_endp = usb_pipeendpoint(urb->pipe);
  2180. struct usb_device *usb_dev = urb->dev;
  2181. if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
  2182. u8 address = u132->addr[usb_addr].address;
  2183. struct u132_udev *udev = &u132->udev[address];
  2184. struct u132_endp *endp = urb->ep->hcpriv;
  2185. urb->actual_length = 0;
  2186. if (endp) {
  2187. unsigned long irqs;
  2188. int retval;
  2189. spin_lock_irqsave(&endp->queue_lock.slock,
  2190. irqs);
  2191. retval = usb_hcd_link_urb_to_ep(hcd, urb);
  2192. if (retval == 0) {
  2193. retval = queue_int_on_old_endpoint(
  2194. u132, udev, urb,
  2195. usb_dev, endp,
  2196. usb_addr, usb_endp,
  2197. address);
  2198. if (retval)
  2199. usb_hcd_unlink_urb_from_ep(
  2200. hcd, urb);
  2201. }
  2202. spin_unlock_irqrestore(&endp->queue_lock.slock,
  2203. irqs);
  2204. if (retval) {
  2205. return retval;
  2206. } else {
  2207. u132_endp_queue_work(u132, endp,
  2208. msecs_to_jiffies(urb->interval))
  2209. ;
  2210. return 0;
  2211. }
  2212. } else if (u132->num_endpoints == MAX_U132_ENDPS) {
  2213. return -EINVAL;
  2214. } else { /*(endp == NULL) */
  2215. return create_endpoint_and_queue_int(u132, udev,
  2216. urb, usb_dev, usb_addr,
  2217. usb_endp, address, mem_flags);
  2218. }
  2219. } else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  2220. dev_err(&u132->platform_dev->dev, "the hardware does no"
  2221. "t support PIPE_ISOCHRONOUS\n");
  2222. return -EINVAL;
  2223. } else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
  2224. u8 address = u132->addr[usb_addr].address;
  2225. struct u132_udev *udev = &u132->udev[address];
  2226. struct u132_endp *endp = urb->ep->hcpriv;
  2227. urb->actual_length = 0;
  2228. if (endp) {
  2229. unsigned long irqs;
  2230. int retval;
  2231. spin_lock_irqsave(&endp->queue_lock.slock,
  2232. irqs);
  2233. retval = usb_hcd_link_urb_to_ep(hcd, urb);
  2234. if (retval == 0) {
  2235. retval = queue_bulk_on_old_endpoint(
  2236. u132, udev, urb,
  2237. usb_dev, endp,
  2238. usb_addr, usb_endp,
  2239. address);
  2240. if (retval)
  2241. usb_hcd_unlink_urb_from_ep(
  2242. hcd, urb);
  2243. }
  2244. spin_unlock_irqrestore(&endp->queue_lock.slock,
  2245. irqs);
  2246. if (retval) {
  2247. return retval;
  2248. } else {
  2249. u132_endp_queue_work(u132, endp, 0);
  2250. return 0;
  2251. }
  2252. } else if (u132->num_endpoints == MAX_U132_ENDPS) {
  2253. return -EINVAL;
  2254. } else
  2255. return create_endpoint_and_queue_bulk(u132,
  2256. udev, urb, usb_dev, usb_addr,
  2257. usb_endp, address, mem_flags);
  2258. } else {
  2259. struct u132_endp *endp = urb->ep->hcpriv;
  2260. u16 urb_size = 8;
  2261. u8 *b = urb->setup_packet;
  2262. int i = 0;
  2263. char data[30 * 3 + 4];
  2264. char *d = data;
  2265. int m = (sizeof(data) - 1) / 3;
  2266. int l = 0;
  2267. data[0] = 0;
  2268. while (urb_size-- > 0) {
  2269. if (i > m) {
  2270. } else if (i++ < m) {
  2271. int w = sprintf(d, " %02X", *b++);
  2272. d += w;
  2273. l += w;
  2274. } else
  2275. d += sprintf(d, " ..");
  2276. }
  2277. if (endp) {
  2278. unsigned long irqs;
  2279. int retval;
  2280. spin_lock_irqsave(&endp->queue_lock.slock,
  2281. irqs);
  2282. retval = usb_hcd_link_urb_to_ep(hcd, urb);
  2283. if (retval == 0) {
  2284. retval = queue_control_on_old_endpoint(
  2285. u132, urb, usb_dev,
  2286. endp, usb_addr,
  2287. usb_endp);
  2288. if (retval)
  2289. usb_hcd_unlink_urb_from_ep(
  2290. hcd, urb);
  2291. }
  2292. spin_unlock_irqrestore(&endp->queue_lock.slock,
  2293. irqs);
  2294. if (retval) {
  2295. return retval;
  2296. } else {
  2297. u132_endp_queue_work(u132, endp, 0);
  2298. return 0;
  2299. }
  2300. } else if (u132->num_endpoints == MAX_U132_ENDPS) {
  2301. return -EINVAL;
  2302. } else
  2303. return create_endpoint_and_queue_control(u132,
  2304. urb, usb_dev, usb_addr, usb_endp,
  2305. mem_flags);
  2306. }
  2307. }
  2308. }
  2309. static int dequeue_from_overflow_chain(struct u132 *u132,
  2310. struct u132_endp *endp, struct urb *urb)
  2311. {
  2312. struct list_head *scan;
  2313. struct list_head *head = &endp->urb_more;
  2314. list_for_each(scan, head) {
  2315. struct u132_urbq *urbq = list_entry(scan, struct u132_urbq,
  2316. urb_more);
  2317. if (urbq->urb == urb) {
  2318. struct usb_hcd *hcd = u132_to_hcd(u132);
  2319. list_del(scan);
  2320. endp->queue_size -= 1;
  2321. urb->error_count = 0;
  2322. usb_hcd_giveback_urb(hcd, urb, 0);
  2323. return 0;
  2324. } else
  2325. continue;
  2326. }
  2327. dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]=%p ring"
  2328. "[%d] %c%c usb_endp=%d usb_addr=%d size=%d next=%04X last=%04X"
  2329. "\n", urb, endp->endp_number, endp, endp->ring->number,
  2330. endp->input ? 'I' : ' ', endp->output ? 'O' : ' ',
  2331. endp->usb_endp, endp->usb_addr, endp->queue_size,
  2332. endp->queue_next, endp->queue_last);
  2333. return -EINVAL;
  2334. }
  2335. static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
  2336. struct urb *urb, int status)
  2337. {
  2338. unsigned long irqs;
  2339. int rc;
  2340. spin_lock_irqsave(&endp->queue_lock.slock, irqs);
  2341. rc = usb_hcd_check_unlink_urb(u132_to_hcd(u132), urb, status);
  2342. if (rc) {
  2343. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  2344. return rc;
  2345. }
  2346. if (endp->queue_size == 0) {
  2347. dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]"
  2348. "=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb,
  2349. endp->endp_number, endp, endp->ring->number,
  2350. endp->input ? 'I' : ' ', endp->output ? 'O' : ' ',
  2351. endp->usb_endp, endp->usb_addr);
  2352. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  2353. return -EINVAL;
  2354. }
  2355. if (urb == endp->urb_list[ENDP_QUEUE_MASK & endp->queue_next]) {
  2356. if (endp->active) {
  2357. endp->dequeueing = 1;
  2358. endp->edset_flush = 1;
  2359. u132_endp_queue_work(u132, endp, 0);
  2360. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  2361. return 0;
  2362. } else {
  2363. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  2364. u132_hcd_abandon_urb(u132, endp, urb, status);
  2365. return 0;
  2366. }
  2367. } else {
  2368. u16 queue_list = 0;
  2369. u16 queue_size = endp->queue_size;
  2370. u16 queue_scan = endp->queue_next;
  2371. struct urb **urb_slot = NULL;
  2372. while (++queue_list < ENDP_QUEUE_SIZE && --queue_size > 0) {
  2373. if (urb == endp->urb_list[ENDP_QUEUE_MASK &
  2374. ++queue_scan]) {
  2375. urb_slot = &endp->urb_list[ENDP_QUEUE_MASK &
  2376. queue_scan];
  2377. break;
  2378. } else
  2379. continue;
  2380. }
  2381. while (++queue_list < ENDP_QUEUE_SIZE && --queue_size > 0) {
  2382. *urb_slot = endp->urb_list[ENDP_QUEUE_MASK &
  2383. ++queue_scan];
  2384. urb_slot = &endp->urb_list[ENDP_QUEUE_MASK &
  2385. queue_scan];
  2386. }
  2387. if (urb_slot) {
  2388. struct usb_hcd *hcd = u132_to_hcd(u132);
  2389. usb_hcd_unlink_urb_from_ep(hcd, urb);
  2390. endp->queue_size -= 1;
  2391. if (list_empty(&endp->urb_more)) {
  2392. spin_unlock_irqrestore(&endp->queue_lock.slock,
  2393. irqs);
  2394. } else {
  2395. struct list_head *next = endp->urb_more.next;
  2396. struct u132_urbq *urbq = list_entry(next,
  2397. struct u132_urbq, urb_more);
  2398. list_del(next);
  2399. *urb_slot = urbq->urb;
  2400. spin_unlock_irqrestore(&endp->queue_lock.slock,
  2401. irqs);
  2402. kfree(urbq);
  2403. } urb->error_count = 0;
  2404. usb_hcd_giveback_urb(hcd, urb, status);
  2405. return 0;
  2406. } else if (list_empty(&endp->urb_more)) {
  2407. dev_err(&u132->platform_dev->dev, "urb=%p not found in "
  2408. "endp[%d]=%p ring[%d] %c%c usb_endp=%d usb_addr"
  2409. "=%d size=%d next=%04X last=%04X\n", urb,
  2410. endp->endp_number, endp, endp->ring->number,
  2411. endp->input ? 'I' : ' ',
  2412. endp->output ? 'O' : ' ', endp->usb_endp,
  2413. endp->usb_addr, endp->queue_size,
  2414. endp->queue_next, endp->queue_last);
  2415. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  2416. return -EINVAL;
  2417. } else {
  2418. int retval;
  2419. usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132), urb);
  2420. retval = dequeue_from_overflow_chain(u132, endp,
  2421. urb);
  2422. spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
  2423. return retval;
  2424. }
  2425. }
  2426. }
  2427. static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  2428. {
  2429. struct u132 *u132 = hcd_to_u132(hcd);
  2430. if (u132->going > 2) {
  2431. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  2432. , u132->going);
  2433. return -ENODEV;
  2434. } else {
  2435. u8 usb_addr = usb_pipedevice(urb->pipe);
  2436. u8 usb_endp = usb_pipeendpoint(urb->pipe);
  2437. u8 address = u132->addr[usb_addr].address;
  2438. struct u132_udev *udev = &u132->udev[address];
  2439. if (usb_pipein(urb->pipe)) {
  2440. u8 endp_number = udev->endp_number_in[usb_endp];
  2441. struct u132_endp *endp = u132->endp[endp_number - 1];
  2442. return u132_endp_urb_dequeue(u132, endp, urb, status);
  2443. } else {
  2444. u8 endp_number = udev->endp_number_out[usb_endp];
  2445. struct u132_endp *endp = u132->endp[endp_number - 1];
  2446. return u132_endp_urb_dequeue(u132, endp, urb, status);
  2447. }
  2448. }
  2449. }
  2450. static void u132_endpoint_disable(struct usb_hcd *hcd,
  2451. struct usb_host_endpoint *hep)
  2452. {
  2453. struct u132 *u132 = hcd_to_u132(hcd);
  2454. if (u132->going > 2) {
  2455. dev_err(&u132->platform_dev->dev, "u132 device %p(hcd=%p hep=%p"
  2456. ") has been removed %d\n", u132, hcd, hep,
  2457. u132->going);
  2458. } else {
  2459. struct u132_endp *endp = hep->hcpriv;
  2460. if (endp)
  2461. u132_endp_put_kref(u132, endp);
  2462. }
  2463. }
  2464. static int u132_get_frame(struct usb_hcd *hcd)
  2465. {
  2466. struct u132 *u132 = hcd_to_u132(hcd);
  2467. if (u132->going > 1) {
  2468. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  2469. , u132->going);
  2470. return -ENODEV;
  2471. } else if (u132->going > 0) {
  2472. dev_err(&u132->platform_dev->dev, "device is being removed\n");
  2473. return -ESHUTDOWN;
  2474. } else {
  2475. int frame = 0;
  2476. dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
  2477. msleep(100);
  2478. return frame;
  2479. }
  2480. }
  2481. static int u132_roothub_descriptor(struct u132 *u132,
  2482. struct usb_hub_descriptor *desc)
  2483. {
  2484. int retval;
  2485. u16 temp;
  2486. u32 rh_a = -1;
  2487. u32 rh_b = -1;
  2488. retval = u132_read_pcimem(u132, roothub.a, &rh_a);
  2489. if (retval)
  2490. return retval;
  2491. desc->bDescriptorType = 0x29;
  2492. desc->bPwrOn2PwrGood = (rh_a & RH_A_POTPGT) >> 24;
  2493. desc->bHubContrCurrent = 0;
  2494. desc->bNbrPorts = u132->num_ports;
  2495. temp = 1 + (u132->num_ports / 8);
  2496. desc->bDescLength = 7 + 2 * temp;
  2497. temp = 0;
  2498. if (rh_a & RH_A_NPS)
  2499. temp |= 0x0002;
  2500. if (rh_a & RH_A_PSM)
  2501. temp |= 0x0001;
  2502. if (rh_a & RH_A_NOCP)
  2503. temp |= 0x0010;
  2504. else if (rh_a & RH_A_OCPM)
  2505. temp |= 0x0008;
  2506. desc->wHubCharacteristics = cpu_to_le16(temp);
  2507. retval = u132_read_pcimem(u132, roothub.b, &rh_b);
  2508. if (retval)
  2509. return retval;
  2510. memset(desc->u.hs.DeviceRemovable, 0xff,
  2511. sizeof(desc->u.hs.DeviceRemovable));
  2512. desc->u.hs.DeviceRemovable[0] = rh_b & RH_B_DR;
  2513. if (u132->num_ports > 7) {
  2514. desc->u.hs.DeviceRemovable[1] = (rh_b & RH_B_DR) >> 8;
  2515. desc->u.hs.DeviceRemovable[2] = 0xff;
  2516. } else
  2517. desc->u.hs.DeviceRemovable[1] = 0xff;
  2518. return 0;
  2519. }
  2520. static int u132_roothub_status(struct u132 *u132, __le32 *desc)
  2521. {
  2522. u32 rh_status = -1;
  2523. int ret_status = u132_read_pcimem(u132, roothub.status, &rh_status);
  2524. *desc = cpu_to_le32(rh_status);
  2525. return ret_status;
  2526. }
  2527. static int u132_roothub_portstatus(struct u132 *u132, __le32 *desc, u16 wIndex)
  2528. {
  2529. if (wIndex == 0 || wIndex > u132->num_ports) {
  2530. return -EINVAL;
  2531. } else {
  2532. int port = wIndex - 1;
  2533. u32 rh_portstatus = -1;
  2534. int ret_portstatus = u132_read_pcimem(u132,
  2535. roothub.portstatus[port], &rh_portstatus);
  2536. *desc = cpu_to_le32(rh_portstatus);
  2537. if (*(u16 *) (desc + 2)) {
  2538. dev_info(&u132->platform_dev->dev, "Port %d Status Chan"
  2539. "ge = %08X\n", port, *desc);
  2540. }
  2541. return ret_portstatus;
  2542. }
  2543. }
  2544. /* this timer value might be vendor-specific ... */
  2545. #define PORT_RESET_HW_MSEC 10
  2546. #define PORT_RESET_MSEC 10
  2547. /* wrap-aware logic morphed from <linux/jiffies.h> */
  2548. #define tick_before(t1, t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
  2549. static int u132_roothub_portreset(struct u132 *u132, int port_index)
  2550. {
  2551. int retval;
  2552. u32 fmnumber;
  2553. u16 now;
  2554. u16 reset_done;
  2555. retval = u132_read_pcimem(u132, fmnumber, &fmnumber);
  2556. if (retval)
  2557. return retval;
  2558. now = fmnumber;
  2559. reset_done = now + PORT_RESET_MSEC;
  2560. do {
  2561. u32 portstat;
  2562. do {
  2563. retval = u132_read_pcimem(u132,
  2564. roothub.portstatus[port_index], &portstat);
  2565. if (retval)
  2566. return retval;
  2567. if (RH_PS_PRS & portstat)
  2568. continue;
  2569. else
  2570. break;
  2571. } while (tick_before(now, reset_done));
  2572. if (RH_PS_PRS & portstat)
  2573. return -ENODEV;
  2574. if (RH_PS_CCS & portstat) {
  2575. if (RH_PS_PRSC & portstat) {
  2576. retval = u132_write_pcimem(u132,
  2577. roothub.portstatus[port_index],
  2578. RH_PS_PRSC);
  2579. if (retval)
  2580. return retval;
  2581. }
  2582. } else
  2583. break; /* start the next reset,
  2584. sleep till it's probably done */
  2585. retval = u132_write_pcimem(u132, roothub.portstatus[port_index],
  2586. RH_PS_PRS);
  2587. if (retval)
  2588. return retval;
  2589. msleep(PORT_RESET_HW_MSEC);
  2590. retval = u132_read_pcimem(u132, fmnumber, &fmnumber);
  2591. if (retval)
  2592. return retval;
  2593. now = fmnumber;
  2594. } while (tick_before(now, reset_done));
  2595. return 0;
  2596. }
  2597. static int u132_roothub_setportfeature(struct u132 *u132, u16 wValue,
  2598. u16 wIndex)
  2599. {
  2600. if (wIndex == 0 || wIndex > u132->num_ports) {
  2601. return -EINVAL;
  2602. } else {
  2603. int retval;
  2604. int port_index = wIndex - 1;
  2605. struct u132_port *port = &u132->port[port_index];
  2606. port->Status &= ~(1 << wValue);
  2607. switch (wValue) {
  2608. case USB_PORT_FEAT_SUSPEND:
  2609. retval = u132_write_pcimem(u132,
  2610. roothub.portstatus[port_index], RH_PS_PSS);
  2611. if (retval)
  2612. return retval;
  2613. return 0;
  2614. case USB_PORT_FEAT_POWER:
  2615. retval = u132_write_pcimem(u132,
  2616. roothub.portstatus[port_index], RH_PS_PPS);
  2617. if (retval)
  2618. return retval;
  2619. return 0;
  2620. case USB_PORT_FEAT_RESET:
  2621. retval = u132_roothub_portreset(u132, port_index);
  2622. if (retval)
  2623. return retval;
  2624. return 0;
  2625. default:
  2626. return -EPIPE;
  2627. }
  2628. }
  2629. }
  2630. static int u132_roothub_clearportfeature(struct u132 *u132, u16 wValue,
  2631. u16 wIndex)
  2632. {
  2633. if (wIndex == 0 || wIndex > u132->num_ports) {
  2634. return -EINVAL;
  2635. } else {
  2636. int port_index = wIndex - 1;
  2637. u32 temp;
  2638. int retval;
  2639. struct u132_port *port = &u132->port[port_index];
  2640. port->Status &= ~(1 << wValue);
  2641. switch (wValue) {
  2642. case USB_PORT_FEAT_ENABLE:
  2643. temp = RH_PS_CCS;
  2644. break;
  2645. case USB_PORT_FEAT_C_ENABLE:
  2646. temp = RH_PS_PESC;
  2647. break;
  2648. case USB_PORT_FEAT_SUSPEND:
  2649. temp = RH_PS_POCI;
  2650. if ((u132->hc_control & OHCI_CTRL_HCFS)
  2651. != OHCI_USB_OPER) {
  2652. dev_err(&u132->platform_dev->dev, "TODO resume_"
  2653. "root_hub\n");
  2654. }
  2655. break;
  2656. case USB_PORT_FEAT_C_SUSPEND:
  2657. temp = RH_PS_PSSC;
  2658. break;
  2659. case USB_PORT_FEAT_POWER:
  2660. temp = RH_PS_LSDA;
  2661. break;
  2662. case USB_PORT_FEAT_C_CONNECTION:
  2663. temp = RH_PS_CSC;
  2664. break;
  2665. case USB_PORT_FEAT_C_OVER_CURRENT:
  2666. temp = RH_PS_OCIC;
  2667. break;
  2668. case USB_PORT_FEAT_C_RESET:
  2669. temp = RH_PS_PRSC;
  2670. break;
  2671. default:
  2672. return -EPIPE;
  2673. }
  2674. retval = u132_write_pcimem(u132, roothub.portstatus[port_index],
  2675. temp);
  2676. if (retval)
  2677. return retval;
  2678. return 0;
  2679. }
  2680. }
  2681. /* the virtual root hub timer IRQ checks for hub status*/
  2682. static int u132_hub_status_data(struct usb_hcd *hcd, char *buf)
  2683. {
  2684. struct u132 *u132 = hcd_to_u132(hcd);
  2685. if (u132->going > 1) {
  2686. dev_err(&u132->platform_dev->dev, "device hcd=%p has been remov"
  2687. "ed %d\n", hcd, u132->going);
  2688. return -ENODEV;
  2689. } else if (u132->going > 0) {
  2690. dev_err(&u132->platform_dev->dev, "device hcd=%p is being remov"
  2691. "ed\n", hcd);
  2692. return -ESHUTDOWN;
  2693. } else {
  2694. int i, changed = 0, length = 1;
  2695. if (u132->flags & OHCI_QUIRK_AMD756) {
  2696. if ((u132->hc_roothub_a & RH_A_NDP) > MAX_ROOT_PORTS) {
  2697. dev_err(&u132->platform_dev->dev, "bogus NDP, r"
  2698. "ereads as NDP=%d\n",
  2699. u132->hc_roothub_a & RH_A_NDP);
  2700. goto done;
  2701. }
  2702. }
  2703. if (u132->hc_roothub_status & (RH_HS_LPSC | RH_HS_OCIC))
  2704. buf[0] = changed = 1;
  2705. else
  2706. buf[0] = 0;
  2707. if (u132->num_ports > 7) {
  2708. buf[1] = 0;
  2709. length++;
  2710. }
  2711. for (i = 0; i < u132->num_ports; i++) {
  2712. if (u132->hc_roothub_portstatus[i] & (RH_PS_CSC |
  2713. RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC |
  2714. RH_PS_PRSC)) {
  2715. changed = 1;
  2716. if (i < 7)
  2717. buf[0] |= 1 << (i + 1);
  2718. else
  2719. buf[1] |= 1 << (i - 7);
  2720. continue;
  2721. }
  2722. if (!(u132->hc_roothub_portstatus[i] & RH_PS_CCS))
  2723. continue;
  2724. if ((u132->hc_roothub_portstatus[i] & RH_PS_PSS))
  2725. continue;
  2726. }
  2727. done:
  2728. return changed ? length : 0;
  2729. }
  2730. }
  2731. static int u132_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
  2732. u16 wIndex, char *buf, u16 wLength)
  2733. {
  2734. struct u132 *u132 = hcd_to_u132(hcd);
  2735. if (u132->going > 1) {
  2736. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  2737. , u132->going);
  2738. return -ENODEV;
  2739. } else if (u132->going > 0) {
  2740. dev_err(&u132->platform_dev->dev, "device is being removed\n");
  2741. return -ESHUTDOWN;
  2742. } else {
  2743. int retval = 0;
  2744. mutex_lock(&u132->sw_lock);
  2745. switch (typeReq) {
  2746. case ClearHubFeature:
  2747. switch (wValue) {
  2748. case C_HUB_OVER_CURRENT:
  2749. case C_HUB_LOCAL_POWER:
  2750. break;
  2751. default:
  2752. goto stall;
  2753. }
  2754. break;
  2755. case SetHubFeature:
  2756. switch (wValue) {
  2757. case C_HUB_OVER_CURRENT:
  2758. case C_HUB_LOCAL_POWER:
  2759. break;
  2760. default:
  2761. goto stall;
  2762. }
  2763. break;
  2764. case ClearPortFeature:{
  2765. retval = u132_roothub_clearportfeature(u132,
  2766. wValue, wIndex);
  2767. if (retval)
  2768. goto error;
  2769. break;
  2770. }
  2771. case GetHubDescriptor:{
  2772. retval = u132_roothub_descriptor(u132,
  2773. (struct usb_hub_descriptor *)buf);
  2774. if (retval)
  2775. goto error;
  2776. break;
  2777. }
  2778. case GetHubStatus:{
  2779. retval = u132_roothub_status(u132,
  2780. (__le32 *) buf);
  2781. if (retval)
  2782. goto error;
  2783. break;
  2784. }
  2785. case GetPortStatus:{
  2786. retval = u132_roothub_portstatus(u132,
  2787. (__le32 *) buf, wIndex);
  2788. if (retval)
  2789. goto error;
  2790. break;
  2791. }
  2792. case SetPortFeature:{
  2793. retval = u132_roothub_setportfeature(u132,
  2794. wValue, wIndex);
  2795. if (retval)
  2796. goto error;
  2797. break;
  2798. }
  2799. default:
  2800. goto stall;
  2801. error:
  2802. u132_disable(u132);
  2803. u132->going = 1;
  2804. break;
  2805. stall:
  2806. retval = -EPIPE;
  2807. break;
  2808. }
  2809. mutex_unlock(&u132->sw_lock);
  2810. return retval;
  2811. }
  2812. }
  2813. static int u132_start_port_reset(struct usb_hcd *hcd, unsigned port_num)
  2814. {
  2815. struct u132 *u132 = hcd_to_u132(hcd);
  2816. if (u132->going > 1) {
  2817. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  2818. , u132->going);
  2819. return -ENODEV;
  2820. } else if (u132->going > 0) {
  2821. dev_err(&u132->platform_dev->dev, "device is being removed\n");
  2822. return -ESHUTDOWN;
  2823. } else
  2824. return 0;
  2825. }
  2826. #ifdef CONFIG_PM
  2827. static int u132_bus_suspend(struct usb_hcd *hcd)
  2828. {
  2829. struct u132 *u132 = hcd_to_u132(hcd);
  2830. if (u132->going > 1) {
  2831. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  2832. , u132->going);
  2833. return -ENODEV;
  2834. } else if (u132->going > 0) {
  2835. dev_err(&u132->platform_dev->dev, "device is being removed\n");
  2836. return -ESHUTDOWN;
  2837. } else
  2838. return 0;
  2839. }
  2840. static int u132_bus_resume(struct usb_hcd *hcd)
  2841. {
  2842. struct u132 *u132 = hcd_to_u132(hcd);
  2843. if (u132->going > 1) {
  2844. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  2845. , u132->going);
  2846. return -ENODEV;
  2847. } else if (u132->going > 0) {
  2848. dev_err(&u132->platform_dev->dev, "device is being removed\n");
  2849. return -ESHUTDOWN;
  2850. } else
  2851. return 0;
  2852. }
  2853. #else
  2854. #define u132_bus_suspend NULL
  2855. #define u132_bus_resume NULL
  2856. #endif
  2857. static struct hc_driver u132_hc_driver = {
  2858. .description = hcd_name,
  2859. .hcd_priv_size = sizeof(struct u132),
  2860. .irq = NULL,
  2861. .flags = HCD_USB11 | HCD_MEMORY,
  2862. .reset = u132_hcd_reset,
  2863. .start = u132_hcd_start,
  2864. .stop = u132_hcd_stop,
  2865. .urb_enqueue = u132_urb_enqueue,
  2866. .urb_dequeue = u132_urb_dequeue,
  2867. .endpoint_disable = u132_endpoint_disable,
  2868. .get_frame_number = u132_get_frame,
  2869. .hub_status_data = u132_hub_status_data,
  2870. .hub_control = u132_hub_control,
  2871. .bus_suspend = u132_bus_suspend,
  2872. .bus_resume = u132_bus_resume,
  2873. .start_port_reset = u132_start_port_reset,
  2874. };
  2875. /*
  2876. * This function may be called by the USB core whilst the "usb_all_devices_rwsem"
  2877. * is held for writing, thus this module must not call usb_remove_hcd()
  2878. * synchronously - but instead should immediately stop activity to the
  2879. * device and asynchronously call usb_remove_hcd()
  2880. */
  2881. static int __devexit u132_remove(struct platform_device *pdev)
  2882. {
  2883. struct usb_hcd *hcd = platform_get_drvdata(pdev);
  2884. if (hcd) {
  2885. struct u132 *u132 = hcd_to_u132(hcd);
  2886. if (u132->going++ > 1) {
  2887. dev_err(&u132->platform_dev->dev, "already being remove"
  2888. "d\n");
  2889. return -ENODEV;
  2890. } else {
  2891. int rings = MAX_U132_RINGS;
  2892. int endps = MAX_U132_ENDPS;
  2893. dev_err(&u132->platform_dev->dev, "removing device u132"
  2894. ".%d\n", u132->sequence_num);
  2895. msleep(100);
  2896. mutex_lock(&u132->sw_lock);
  2897. u132_monitor_cancel_work(u132);
  2898. while (rings-- > 0) {
  2899. struct u132_ring *ring = &u132->ring[rings];
  2900. u132_ring_cancel_work(u132, ring);
  2901. } while (endps-- > 0) {
  2902. struct u132_endp *endp = u132->endp[endps];
  2903. if (endp)
  2904. u132_endp_cancel_work(u132, endp);
  2905. }
  2906. u132->going += 1;
  2907. printk(KERN_INFO "removing device u132.%d\n",
  2908. u132->sequence_num);
  2909. mutex_unlock(&u132->sw_lock);
  2910. usb_remove_hcd(hcd);
  2911. u132_u132_put_kref(u132);
  2912. return 0;
  2913. }
  2914. } else
  2915. return 0;
  2916. }
  2917. static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
  2918. {
  2919. int rings = MAX_U132_RINGS;
  2920. int ports = MAX_U132_PORTS;
  2921. int addrs = MAX_U132_ADDRS;
  2922. int udevs = MAX_U132_UDEVS;
  2923. int endps = MAX_U132_ENDPS;
  2924. u132->board = pdev->dev.platform_data;
  2925. u132->platform_dev = pdev;
  2926. u132->power = 0;
  2927. u132->reset = 0;
  2928. mutex_init(&u132->sw_lock);
  2929. mutex_init(&u132->scheduler_lock);
  2930. while (rings-- > 0) {
  2931. struct u132_ring *ring = &u132->ring[rings];
  2932. ring->u132 = u132;
  2933. ring->number = rings + 1;
  2934. ring->length = 0;
  2935. ring->curr_endp = NULL;
  2936. INIT_DELAYED_WORK(&ring->scheduler,
  2937. u132_hcd_ring_work_scheduler);
  2938. }
  2939. mutex_lock(&u132->sw_lock);
  2940. INIT_DELAYED_WORK(&u132->monitor, u132_hcd_monitor_work);
  2941. while (ports-- > 0) {
  2942. struct u132_port *port = &u132->port[ports];
  2943. port->u132 = u132;
  2944. port->reset = 0;
  2945. port->enable = 0;
  2946. port->power = 0;
  2947. port->Status = 0;
  2948. }
  2949. while (addrs-- > 0) {
  2950. struct u132_addr *addr = &u132->addr[addrs];
  2951. addr->address = 0;
  2952. }
  2953. while (udevs-- > 0) {
  2954. struct u132_udev *udev = &u132->udev[udevs];
  2955. int i = ARRAY_SIZE(udev->endp_number_in);
  2956. int o = ARRAY_SIZE(udev->endp_number_out);
  2957. udev->usb_device = NULL;
  2958. udev->udev_number = 0;
  2959. udev->usb_addr = 0;
  2960. udev->portnumber = 0;
  2961. while (i-- > 0)
  2962. udev->endp_number_in[i] = 0;
  2963. while (o-- > 0)
  2964. udev->endp_number_out[o] = 0;
  2965. }
  2966. while (endps-- > 0)
  2967. u132->endp[endps] = NULL;
  2968. mutex_unlock(&u132->sw_lock);
  2969. }
  2970. static int __devinit u132_probe(struct platform_device *pdev)
  2971. {
  2972. struct usb_hcd *hcd;
  2973. int retval;
  2974. u32 control;
  2975. u32 rh_a = -1;
  2976. u32 num_ports;
  2977. msleep(100);
  2978. if (u132_exiting > 0)
  2979. return -ENODEV;
  2980. retval = ftdi_write_pcimem(pdev, intrdisable, OHCI_INTR_MIE);
  2981. if (retval)
  2982. return retval;
  2983. retval = ftdi_read_pcimem(pdev, control, &control);
  2984. if (retval)
  2985. return retval;
  2986. retval = ftdi_read_pcimem(pdev, roothub.a, &rh_a);
  2987. if (retval)
  2988. return retval;
  2989. num_ports = rh_a & RH_A_NDP; /* refuse to confuse usbcore */
  2990. if (pdev->dev.dma_mask)
  2991. return -EINVAL;
  2992. hcd = usb_create_hcd(&u132_hc_driver, &pdev->dev, dev_name(&pdev->dev));
  2993. if (!hcd) {
  2994. printk(KERN_ERR "failed to create the usb hcd struct for U132\n"
  2995. );
  2996. ftdi_elan_gone_away(pdev);
  2997. return -ENOMEM;
  2998. } else {
  2999. struct u132 *u132 = hcd_to_u132(hcd);
  3000. retval = 0;
  3001. hcd->rsrc_start = 0;
  3002. mutex_lock(&u132_module_lock);
  3003. list_add_tail(&u132->u132_list, &u132_static_list);
  3004. u132->sequence_num = ++u132_instances;
  3005. mutex_unlock(&u132_module_lock);
  3006. u132_u132_init_kref(u132);
  3007. u132_initialise(u132, pdev);
  3008. hcd->product_desc = "ELAN U132 Host Controller";
  3009. retval = usb_add_hcd(hcd, 0, 0);
  3010. if (retval != 0) {
  3011. dev_err(&u132->platform_dev->dev, "init error %d\n",
  3012. retval);
  3013. u132_u132_put_kref(u132);
  3014. return retval;
  3015. } else {
  3016. u132_monitor_queue_work(u132, 100);
  3017. return 0;
  3018. }
  3019. }
  3020. }
  3021. #ifdef CONFIG_PM
  3022. /* for this device there's no useful distinction between the controller
  3023. * and its root hub, except that the root hub only gets direct PM calls
  3024. * when CONFIG_USB_SUSPEND is enabled.
  3025. */
  3026. static int u132_suspend(struct platform_device *pdev, pm_message_t state)
  3027. {
  3028. struct usb_hcd *hcd = platform_get_drvdata(pdev);
  3029. struct u132 *u132 = hcd_to_u132(hcd);
  3030. if (u132->going > 1) {
  3031. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  3032. , u132->going);
  3033. return -ENODEV;
  3034. } else if (u132->going > 0) {
  3035. dev_err(&u132->platform_dev->dev, "device is being removed\n");
  3036. return -ESHUTDOWN;
  3037. } else {
  3038. int retval = 0, ports;
  3039. switch (state.event) {
  3040. case PM_EVENT_FREEZE:
  3041. retval = u132_bus_suspend(hcd);
  3042. break;
  3043. case PM_EVENT_SUSPEND:
  3044. case PM_EVENT_HIBERNATE:
  3045. ports = MAX_U132_PORTS;
  3046. while (ports-- > 0) {
  3047. port_power(u132, ports, 0);
  3048. }
  3049. break;
  3050. }
  3051. return retval;
  3052. }
  3053. }
  3054. static int u132_resume(struct platform_device *pdev)
  3055. {
  3056. struct usb_hcd *hcd = platform_get_drvdata(pdev);
  3057. struct u132 *u132 = hcd_to_u132(hcd);
  3058. if (u132->going > 1) {
  3059. dev_err(&u132->platform_dev->dev, "device has been removed %d\n"
  3060. , u132->going);
  3061. return -ENODEV;
  3062. } else if (u132->going > 0) {
  3063. dev_err(&u132->platform_dev->dev, "device is being removed\n");
  3064. return -ESHUTDOWN;
  3065. } else {
  3066. int retval = 0;
  3067. if (!u132->port[0].power) {
  3068. int ports = MAX_U132_PORTS;
  3069. while (ports-- > 0) {
  3070. port_power(u132, ports, 1);
  3071. }
  3072. retval = 0;
  3073. } else {
  3074. retval = u132_bus_resume(hcd);
  3075. }
  3076. return retval;
  3077. }
  3078. }
  3079. #else
  3080. #define u132_suspend NULL
  3081. #define u132_resume NULL
  3082. #endif
  3083. /*
  3084. * this driver is loaded explicitly by ftdi_u132
  3085. *
  3086. * the platform_driver struct is static because it is per type of module
  3087. */
  3088. static struct platform_driver u132_platform_driver = {
  3089. .probe = u132_probe,
  3090. .remove = __devexit_p(u132_remove),
  3091. .suspend = u132_suspend,
  3092. .resume = u132_resume,
  3093. .driver = {
  3094. .name = (char *)hcd_name,
  3095. .owner = THIS_MODULE,
  3096. },
  3097. };
  3098. static int __init u132_hcd_init(void)
  3099. {
  3100. int retval;
  3101. INIT_LIST_HEAD(&u132_static_list);
  3102. u132_instances = 0;
  3103. u132_exiting = 0;
  3104. mutex_init(&u132_module_lock);
  3105. if (usb_disabled())
  3106. return -ENODEV;
  3107. printk(KERN_INFO "driver %s\n", hcd_name);
  3108. workqueue = create_singlethread_workqueue("u132");
  3109. retval = platform_driver_register(&u132_platform_driver);
  3110. return retval;
  3111. }
  3112. module_init(u132_hcd_init);
  3113. static void __exit u132_hcd_exit(void)
  3114. {
  3115. struct u132 *u132;
  3116. struct u132 *temp;
  3117. mutex_lock(&u132_module_lock);
  3118. u132_exiting += 1;
  3119. mutex_unlock(&u132_module_lock);
  3120. list_for_each_entry_safe(u132, temp, &u132_static_list, u132_list) {
  3121. platform_device_unregister(u132->platform_dev);
  3122. }
  3123. platform_driver_unregister(&u132_platform_driver);
  3124. printk(KERN_INFO "u132-hcd driver deregistered\n");
  3125. wait_event(u132_hcd_wait, u132_instances == 0);
  3126. flush_workqueue(workqueue);
  3127. destroy_workqueue(workqueue);
  3128. }
  3129. module_exit(u132_hcd_exit);
  3130. MODULE_LICENSE("GPL");
  3131. MODULE_ALIAS("platform:u132_hcd");