ehci-msm-hsic.c 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399
  1. /* ehci-msm-hsic.c - HSUSB Host Controller Driver Implementation
  2. *
  3. * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  4. *
  5. * Partly derived from ehci-fsl.c and ehci-hcd.c
  6. * Copyright (c) 2000-2004 by David Brownell
  7. * Copyright (c) 2005 MontaVista Software
  8. *
  9. * All source code in this file is licensed under the following license except
  10. * where indicated.
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License version 2 as published
  14. * by the Free Software Foundation.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  19. *
  20. * See the GNU General Public License for more details.
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, you can find it at http://www.fsf.org
  23. */
  24. #include <linux/platform_device.h>
  25. #include <linux/clk.h>
  26. #include <linux/err.h>
  27. #include <linux/debugfs.h>
  28. #include <linux/seq_file.h>
  29. #include <linux/wakelock.h>
  30. #include <linux/pm_runtime.h>
  31. #include <linux/regulator/consumer.h>
  32. #include <linux/usb/ulpi.h>
  33. #include <linux/usb/msm_hsusb_hw.h>
  34. #include <linux/usb/msm_hsusb.h>
  35. #include <linux/gpio.h>
  36. #include <linux/of_gpio.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/irq.h>
  39. #include <linux/kthread.h>
  40. #include <linux/wait.h>
  41. #include <linux/pm_qos.h>
  42. #include <mach/msm_bus.h>
  43. #include <mach/clk.h>
  44. #include <mach/msm_iomap.h>
  45. #include <mach/msm_xo.h>
  46. #include <linux/spinlock.h>
  47. #include <linux/cpu.h>
  48. #include <mach/rpm-regulator.h>
  49. #include "hbm.c"
  50. #define MSM_USB_BASE (hcd->regs)
  51. #define USB_REG_START_OFFSET 0x90
  52. #define USB_REG_END_OFFSET 0x250
  53. static struct workqueue_struct *ehci_wq;
  54. struct ehci_timer {
  55. #define GPT_LD(p) ((p) & 0x00FFFFFF)
  56. u32 gptimer0_ld;
  57. #define GPT_RUN BIT(31)
  58. #define GPT_RESET BIT(30)
  59. #define GPT_MODE BIT(24)
  60. #define GPT_CNT(p) ((p) & 0x00FFFFFF)
  61. u32 gptimer0_ctrl;
  62. u32 gptimer1_ld;
  63. u32 gptimer1_ctrl;
  64. };
  65. struct msm_hsic_hcd {
  66. struct ehci_hcd ehci;
  67. spinlock_t wakeup_lock;
  68. struct device *dev;
  69. struct clk *ahb_clk;
  70. struct clk *core_clk;
  71. struct clk *alt_core_clk;
  72. struct clk *phy_clk;
  73. struct clk *cal_clk;
  74. struct clk *inactivity_clk;
  75. struct regulator *hsic_vddcx;
  76. struct regulator *hsic_gdsc;
  77. atomic_t async_int;
  78. atomic_t in_lpm;
  79. struct wake_lock wlock;
  80. int peripheral_status_irq;
  81. int wakeup_irq;
  82. bool wakeup_irq_enabled;
  83. int async_irq;
  84. uint32_t async_int_cnt;
  85. atomic_t pm_usage_cnt;
  86. uint32_t bus_perf_client;
  87. uint32_t wakeup_int_cnt;
  88. enum usb_vdd_type vdd_type;
  89. struct work_struct bus_vote_w;
  90. bool bus_vote;
  91. /* gp timer */
  92. struct ehci_timer __iomem *timer;
  93. struct completion gpt0_completion;
  94. struct completion rt_completion;
  95. int resume_status;
  96. int resume_again;
  97. int bus_reset;
  98. int reset_again;
  99. struct pm_qos_request pm_qos_req_dma;
  100. unsigned enable_hbm:1;
  101. };
  102. struct msm_hsic_hcd *__mehci;
  103. static bool debug_bus_voting_enabled = true;
  104. static u64 ehci_msm_hsic_dma_mask = DMA_BIT_MASK(32);
  105. static struct platform_driver ehci_msm_hsic_driver;
  106. static unsigned int enable_payload_log = 1;
  107. module_param(enable_payload_log, uint, S_IRUGO | S_IWUSR);
  108. static unsigned int enable_dbg_log = 1;
  109. module_param(enable_dbg_log, uint, S_IRUGO | S_IWUSR);
  110. /*by default log ep0 and efs sync ep*/
  111. static unsigned int ep_addr_rxdbg_mask = 9;
  112. module_param(ep_addr_rxdbg_mask, uint, S_IRUGO | S_IWUSR);
  113. static unsigned int ep_addr_txdbg_mask = 9;
  114. module_param(ep_addr_txdbg_mask, uint, S_IRUGO | S_IWUSR);
  115. /* Maximum debug message length */
  116. #define DBG_MSG_LEN 128UL
  117. /* Maximum number of messages */
  118. #define DBG_MAX_MSG 256UL
  119. #define TIME_BUF_LEN 20
  120. #define HEX_DUMP_LEN 72
  121. enum event_type {
  122. EVENT_UNDEF = -1,
  123. URB_SUBMIT,
  124. URB_COMPLETE,
  125. EVENT_NONE,
  126. };
  127. #define EVENT_STR_LEN 5
  128. static enum event_type str_to_event(const char *name)
  129. {
  130. if (!strncasecmp("S", name, EVENT_STR_LEN))
  131. return URB_SUBMIT;
  132. if (!strncasecmp("C", name, EVENT_STR_LEN))
  133. return URB_COMPLETE;
  134. if (!strncasecmp("", name, EVENT_STR_LEN))
  135. return EVENT_NONE;
  136. return EVENT_UNDEF;
  137. }
  138. /*log ep0 activity*/
  139. static struct {
  140. char (buf[DBG_MAX_MSG])[DBG_MSG_LEN]; /* buffer */
  141. unsigned idx; /* index */
  142. rwlock_t lck; /* lock */
  143. } dbg_hsic_ctrl = {
  144. .idx = 0,
  145. .lck = __RW_LOCK_UNLOCKED(lck)
  146. };
  147. static struct {
  148. char (buf[DBG_MAX_MSG])[DBG_MSG_LEN]; /* buffer */
  149. unsigned idx; /* index */
  150. rwlock_t lck; /* lock */
  151. } dbg_hsic_data = {
  152. .idx = 0,
  153. .lck = __RW_LOCK_UNLOCKED(lck)
  154. };
  155. /**
  156. * dbg_inc: increments debug event index
  157. * @idx: buffer index
  158. */
  159. static void dbg_inc(unsigned *idx)
  160. {
  161. *idx = (*idx + 1) & (DBG_MAX_MSG-1);
  162. }
  163. /*get_timestamp - returns time of day in us */
  164. static char *get_timestamp(char *tbuf)
  165. {
  166. unsigned long long t;
  167. unsigned long nanosec_rem;
  168. t = cpu_clock(smp_processor_id());
  169. nanosec_rem = do_div(t, 1000000000)/1000;
  170. scnprintf(tbuf, TIME_BUF_LEN, "[%5lu.%06lu] ", (unsigned long)t,
  171. nanosec_rem);
  172. return tbuf;
  173. }
  174. static int allow_dbg_log(int ep_addr)
  175. {
  176. int dir, num;
  177. dir = ep_addr & USB_DIR_IN ? USB_DIR_IN : USB_DIR_OUT;
  178. num = ep_addr & ~USB_DIR_IN;
  179. num = 1 << num;
  180. if ((dir == USB_DIR_IN) && (num & ep_addr_rxdbg_mask))
  181. return 1;
  182. if ((dir == USB_DIR_OUT) && (num & ep_addr_txdbg_mask))
  183. return 1;
  184. return 0;
  185. }
  186. static char *get_hex_data(char *dbuf, struct urb *urb, int event, int status)
  187. {
  188. int ep_addr = urb->ep->desc.bEndpointAddress;
  189. char *ubuf = urb->transfer_buffer;
  190. size_t len = event ? \
  191. urb->actual_length : urb->transfer_buffer_length;
  192. if (status == -EINPROGRESS)
  193. status = 0;
  194. /*Only dump ep in completions and epout submissions*/
  195. if (len && !status &&
  196. (((ep_addr & USB_DIR_IN) && event) ||
  197. (!(ep_addr & USB_DIR_IN) && !event))) {
  198. if (len >= 32)
  199. len = 32;
  200. hex_dump_to_buffer(ubuf, len, 32, 4, dbuf, HEX_DUMP_LEN, 0);
  201. } else {
  202. dbuf = "";
  203. }
  204. return dbuf;
  205. }
  206. static void dbg_log_event(struct urb *urb, char * event, unsigned extra)
  207. {
  208. unsigned long flags;
  209. int ep_addr;
  210. char tbuf[TIME_BUF_LEN];
  211. char dbuf[HEX_DUMP_LEN];
  212. if (!enable_dbg_log)
  213. return;
  214. if (!urb) {
  215. write_lock_irqsave(&dbg_hsic_ctrl.lck, flags);
  216. scnprintf(dbg_hsic_ctrl.buf[dbg_hsic_ctrl.idx], DBG_MSG_LEN,
  217. "%s: %s : %u", get_timestamp(tbuf), event, extra);
  218. dbg_inc(&dbg_hsic_ctrl.idx);
  219. write_unlock_irqrestore(&dbg_hsic_ctrl.lck, flags);
  220. return;
  221. }
  222. ep_addr = urb->ep->desc.bEndpointAddress;
  223. if (!allow_dbg_log(ep_addr))
  224. return;
  225. if ((ep_addr & 0x0f) == 0x0) {
  226. /*submit event*/
  227. if (!str_to_event(event)) {
  228. write_lock_irqsave(&dbg_hsic_ctrl.lck, flags);
  229. scnprintf(dbg_hsic_ctrl.buf[dbg_hsic_ctrl.idx],
  230. DBG_MSG_LEN, "%s: [%s : %pK]:[%s] "
  231. "%02x %02x %04x %04x %04x %u %d",
  232. get_timestamp(tbuf), event, urb,
  233. (ep_addr & USB_DIR_IN) ? "in" : "out",
  234. urb->setup_packet[0], urb->setup_packet[1],
  235. (urb->setup_packet[3] << 8) |
  236. urb->setup_packet[2],
  237. (urb->setup_packet[5] << 8) |
  238. urb->setup_packet[4],
  239. (urb->setup_packet[7] << 8) |
  240. urb->setup_packet[6],
  241. urb->transfer_buffer_length, extra);
  242. dbg_inc(&dbg_hsic_ctrl.idx);
  243. write_unlock_irqrestore(&dbg_hsic_ctrl.lck, flags);
  244. } else {
  245. write_lock_irqsave(&dbg_hsic_ctrl.lck, flags);
  246. scnprintf(dbg_hsic_ctrl.buf[dbg_hsic_ctrl.idx],
  247. DBG_MSG_LEN, "%s: [%s : %pK]:[%s] %u %d",
  248. get_timestamp(tbuf), event, urb,
  249. (ep_addr & USB_DIR_IN) ? "in" : "out",
  250. urb->actual_length, extra);
  251. dbg_inc(&dbg_hsic_ctrl.idx);
  252. write_unlock_irqrestore(&dbg_hsic_ctrl.lck, flags);
  253. }
  254. } else {
  255. write_lock_irqsave(&dbg_hsic_data.lck, flags);
  256. scnprintf(dbg_hsic_data.buf[dbg_hsic_data.idx], DBG_MSG_LEN,
  257. "%s: [%s : %pK]:ep%d[%s] %u %d %s",
  258. get_timestamp(tbuf), event, urb, ep_addr & 0x0f,
  259. (ep_addr & USB_DIR_IN) ? "in" : "out",
  260. str_to_event(event) ? urb->actual_length :
  261. urb->transfer_buffer_length, extra,
  262. enable_payload_log ? get_hex_data(dbuf, urb,
  263. str_to_event(event), extra) : "");
  264. dbg_inc(&dbg_hsic_data.idx);
  265. write_unlock_irqrestore(&dbg_hsic_data.lck, flags);
  266. }
  267. }
  268. static inline struct msm_hsic_hcd *hcd_to_hsic(struct usb_hcd *hcd)
  269. {
  270. return (struct msm_hsic_hcd *) (hcd->hcd_priv);
  271. }
  272. static inline struct usb_hcd *hsic_to_hcd(struct msm_hsic_hcd *mehci)
  273. {
  274. return container_of((void *) mehci, struct usb_hcd, hcd_priv);
  275. }
  276. static void dump_hsic_regs(struct usb_hcd *hcd)
  277. {
  278. int i;
  279. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  280. if (atomic_read(&mehci->in_lpm))
  281. return;
  282. for (i = USB_REG_START_OFFSET; i <= USB_REG_END_OFFSET; i += 0x10)
  283. pr_info("%pK: %08x\t%08x\t%08x\t%08x\n", hcd->regs + i,
  284. readl_relaxed(hcd->regs + i),
  285. readl_relaxed(hcd->regs + i + 4),
  286. readl_relaxed(hcd->regs + i + 8),
  287. readl_relaxed(hcd->regs + i + 0xc));
  288. }
  289. #define ULPI_IO_TIMEOUT_USEC (10 * 1000)
  290. #define USB_PHY_VDD_DIG_VOL_NONE 0 /*uV */
  291. #define USB_PHY_VDD_DIG_VOL_MIN 945000 /* uV */
  292. #define USB_PHY_VDD_DIG_VOL_MAX 1320000 /* uV */
  293. #define HSIC_DBG1_REG 0x38
  294. static int vdd_val[VDD_TYPE_MAX][VDD_VAL_MAX] = {
  295. { /* VDD_CX CORNER Voting */
  296. [VDD_NONE] = RPM_VREG_CORNER_NONE,
  297. [VDD_MIN] = RPM_VREG_CORNER_NOMINAL,
  298. [VDD_MAX] = RPM_VREG_CORNER_HIGH,
  299. },
  300. { /* VDD_CX Voltage Voting */
  301. [VDD_NONE] = USB_PHY_VDD_DIG_VOL_NONE,
  302. [VDD_MIN] = USB_PHY_VDD_DIG_VOL_MIN,
  303. [VDD_MAX] = USB_PHY_VDD_DIG_VOL_MAX,
  304. },
  305. };
  306. static int msm_hsic_init_vddcx(struct msm_hsic_hcd *mehci, int init)
  307. {
  308. int ret = 0;
  309. int none_vol, min_vol, max_vol;
  310. u32 tmp[3];
  311. int len = 0;
  312. if (!mehci->hsic_vddcx) {
  313. mehci->vdd_type = VDDCX_CORNER;
  314. mehci->hsic_vddcx = devm_regulator_get(mehci->dev,
  315. "hsic_vdd_dig");
  316. if (IS_ERR(mehci->hsic_vddcx)) {
  317. mehci->hsic_vddcx = devm_regulator_get(mehci->dev,
  318. "HSIC_VDDCX");
  319. if (IS_ERR(mehci->hsic_vddcx)) {
  320. dev_err(mehci->dev, "unable to get hsic vddcx\n");
  321. return PTR_ERR(mehci->hsic_vddcx);
  322. }
  323. mehci->vdd_type = VDDCX;
  324. }
  325. if (mehci->dev->of_node) {
  326. of_get_property(mehci->dev->of_node,
  327. "hsic,vdd-voltage-level",
  328. &len);
  329. if (len == sizeof(tmp)) {
  330. of_property_read_u32_array(mehci->dev->of_node,
  331. "hsic,vdd-voltage-level",
  332. tmp, len/sizeof(*tmp));
  333. vdd_val[mehci->vdd_type][VDD_NONE] = tmp[0];
  334. vdd_val[mehci->vdd_type][VDD_MIN] = tmp[1];
  335. vdd_val[mehci->vdd_type][VDD_MAX] = tmp[2];
  336. } else {
  337. dev_dbg(mehci->dev, "Use default vdd config\n");
  338. }
  339. }
  340. }
  341. none_vol = vdd_val[mehci->vdd_type][VDD_NONE];
  342. min_vol = vdd_val[mehci->vdd_type][VDD_MIN];
  343. max_vol = vdd_val[mehci->vdd_type][VDD_MAX];
  344. if (!init)
  345. goto disable_reg;
  346. ret = regulator_set_voltage(mehci->hsic_vddcx, min_vol, max_vol);
  347. if (ret) {
  348. dev_err(mehci->dev, "unable to set the voltage"
  349. "for hsic vddcx\n");
  350. return ret;
  351. }
  352. ret = regulator_enable(mehci->hsic_vddcx);
  353. if (ret) {
  354. dev_err(mehci->dev, "unable to enable hsic vddcx\n");
  355. goto reg_enable_err;
  356. }
  357. return 0;
  358. disable_reg:
  359. regulator_disable(mehci->hsic_vddcx);
  360. reg_enable_err:
  361. regulator_set_voltage(mehci->hsic_vddcx, none_vol, max_vol);
  362. return ret;
  363. }
  364. /* Global Distributed Switch Controller (GDSC) init */
  365. static int msm_hsic_init_gdsc(struct msm_hsic_hcd *mehci, int init)
  366. {
  367. int ret = 0;
  368. if (IS_ERR(mehci->hsic_gdsc))
  369. return 0;
  370. if (!mehci->hsic_gdsc) {
  371. mehci->hsic_gdsc = devm_regulator_get(mehci->dev,
  372. "HSIC_GDSC");
  373. if (IS_ERR(mehci->hsic_gdsc))
  374. return 0;
  375. }
  376. if (init) {
  377. ret = regulator_enable(mehci->hsic_gdsc);
  378. if (ret) {
  379. dev_err(mehci->dev, "unable to enable hsic gdsc\n");
  380. return ret;
  381. }
  382. } else {
  383. regulator_disable(mehci->hsic_gdsc);
  384. }
  385. return 0;
  386. }
  387. static int __maybe_unused ulpi_read(struct msm_hsic_hcd *mehci, u32 reg)
  388. {
  389. struct usb_hcd *hcd = hsic_to_hcd(mehci);
  390. int cnt = 0;
  391. /* initiate read operation */
  392. writel_relaxed(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg),
  393. USB_ULPI_VIEWPORT);
  394. /* wait for completion */
  395. while (cnt < ULPI_IO_TIMEOUT_USEC) {
  396. if (!(readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN))
  397. break;
  398. udelay(1);
  399. cnt++;
  400. }
  401. if (cnt >= ULPI_IO_TIMEOUT_USEC) {
  402. dev_err(mehci->dev, "ulpi_read: timeout ULPI_VIEWPORT: %08x\n",
  403. readl_relaxed(USB_ULPI_VIEWPORT));
  404. dev_err(mehci->dev, "PORTSC: %08x USBCMD: %08x FRINDEX: %08x\n",
  405. readl_relaxed(USB_PORTSC),
  406. readl_relaxed(USB_USBCMD),
  407. readl_relaxed(USB_FRINDEX));
  408. /*frame counter increments afte 125us*/
  409. udelay(130);
  410. dev_err(mehci->dev, "ulpi_read: FRINDEX: %08x\n",
  411. readl_relaxed(USB_FRINDEX));
  412. return -ETIMEDOUT;
  413. }
  414. return ULPI_DATA_READ(readl_relaxed(USB_ULPI_VIEWPORT));
  415. }
  416. static int ulpi_write(struct msm_hsic_hcd *mehci, u32 val, u32 reg)
  417. {
  418. struct usb_hcd *hcd = hsic_to_hcd(mehci);
  419. int cnt = 0;
  420. /* initiate write operation */
  421. writel_relaxed(ULPI_RUN | ULPI_WRITE |
  422. ULPI_ADDR(reg) | ULPI_DATA(val),
  423. USB_ULPI_VIEWPORT);
  424. /* wait for completion */
  425. while (cnt < ULPI_IO_TIMEOUT_USEC) {
  426. if (!(readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN))
  427. break;
  428. udelay(1);
  429. cnt++;
  430. }
  431. if (cnt >= ULPI_IO_TIMEOUT_USEC) {
  432. dev_err(mehci->dev, "ulpi_write: timeout ULPI_VIEWPORT: %08x\n",
  433. readl_relaxed(USB_ULPI_VIEWPORT));
  434. dev_err(mehci->dev, "PORTSC: %08x USBCMD: %08x FRINDEX: %08x\n",
  435. readl_relaxed(USB_PORTSC),
  436. readl_relaxed(USB_USBCMD),
  437. readl_relaxed(USB_FRINDEX));
  438. /*frame counter increments afte 125us*/
  439. udelay(130);
  440. dev_err(mehci->dev, "ulpi_write: FRINDEX: %08x\n",
  441. readl_relaxed(USB_FRINDEX));
  442. return -ETIMEDOUT;
  443. }
  444. return 0;
  445. }
  446. static int msm_hsic_config_gpios(struct msm_hsic_hcd *mehci, int gpio_en)
  447. {
  448. int rc = 0;
  449. struct msm_hsic_host_platform_data *pdata;
  450. static int gpio_status;
  451. pdata = mehci->dev->platform_data;
  452. if (!pdata || !pdata->strobe || !pdata->data)
  453. return rc;
  454. if (gpio_status == gpio_en)
  455. return 0;
  456. gpio_status = gpio_en;
  457. if (!gpio_en)
  458. goto free_gpio;
  459. rc = gpio_request(pdata->strobe, "HSIC_STROBE_GPIO");
  460. if (rc < 0) {
  461. dev_err(mehci->dev, "gpio request failed for HSIC STROBE\n");
  462. return rc;
  463. }
  464. rc = gpio_request(pdata->data, "HSIC_DATA_GPIO");
  465. if (rc < 0) {
  466. dev_err(mehci->dev, "gpio request failed for HSIC DATA\n");
  467. goto free_strobe;
  468. }
  469. return 0;
  470. free_gpio:
  471. gpio_free(pdata->data);
  472. free_strobe:
  473. gpio_free(pdata->strobe);
  474. return rc;
  475. }
  476. static void msm_hsic_clk_reset(struct msm_hsic_hcd *mehci)
  477. {
  478. int ret;
  479. /* alt_core_clk exists in targets that do not use asynchronous reset */
  480. if (!IS_ERR(mehci->alt_core_clk)) {
  481. ret = clk_reset(mehci->core_clk, CLK_RESET_ASSERT);
  482. if (ret) {
  483. dev_err(mehci->dev, "hsic clk assert failed:%d\n", ret);
  484. return;
  485. }
  486. /* Since a hw bug, turn off the clock before complete reset */
  487. clk_disable(mehci->core_clk);
  488. ret = clk_reset(mehci->core_clk, CLK_RESET_DEASSERT);
  489. if (ret)
  490. dev_err(mehci->dev, "hsic clk deassert failed:%d\n",
  491. ret);
  492. usleep_range(10000, 12000);
  493. clk_enable(mehci->core_clk);
  494. } else {
  495. /* Using asynchronous block reset to the hardware */
  496. clk_disable_unprepare(mehci->core_clk);
  497. clk_disable_unprepare(mehci->phy_clk);
  498. clk_disable_unprepare(mehci->cal_clk);
  499. clk_disable_unprepare(mehci->ahb_clk);
  500. if (!IS_ERR(mehci->inactivity_clk))
  501. clk_disable_unprepare(mehci->inactivity_clk);
  502. ret = clk_reset(mehci->core_clk, CLK_RESET_ASSERT);
  503. if (ret) {
  504. dev_err(mehci->dev, "hsic clk assert failed:%d\n", ret);
  505. return;
  506. }
  507. usleep_range(10000, 12000);
  508. ret = clk_reset(mehci->core_clk, CLK_RESET_DEASSERT);
  509. if (ret)
  510. dev_err(mehci->dev, "hsic clk deassert failed:%d\n",
  511. ret);
  512. /*
  513. * Required delay between the deassertion and
  514. * clock enablement.
  515. */
  516. ndelay(200);
  517. clk_prepare_enable(mehci->core_clk);
  518. clk_prepare_enable(mehci->phy_clk);
  519. clk_prepare_enable(mehci->cal_clk);
  520. clk_prepare_enable(mehci->ahb_clk);
  521. if (!IS_ERR(mehci->inactivity_clk))
  522. clk_prepare_enable(mehci->inactivity_clk);
  523. }
  524. }
  525. #define HSIC_STROBE_GPIO_PAD_CTL (MSM_TLMM_BASE+0x20C0)
  526. #define HSIC_DATA_GPIO_PAD_CTL (MSM_TLMM_BASE+0x20C4)
  527. #define HSIC_CAL_PAD_CTL (MSM_TLMM_BASE+0x20C8)
  528. #define HSIC_LV_MODE 0x04
  529. #define HSIC_PAD_CALIBRATION 0xA8
  530. #define HSIC_GPIO_PAD_VAL 0x0A0AAA10
  531. #define LINK_RESET_TIMEOUT_USEC (250 * 1000)
  532. static void msm_hsic_phy_reset(struct msm_hsic_hcd *mehci)
  533. {
  534. struct usb_hcd *hcd = hsic_to_hcd(mehci);
  535. msm_hsic_clk_reset(mehci);
  536. /* select ulpi phy */
  537. writel_relaxed(0x80000000, USB_PORTSC);
  538. mb();
  539. }
  540. static int msm_hsic_start(struct msm_hsic_hcd *mehci)
  541. {
  542. struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
  543. int ret;
  544. void __iomem *reg;
  545. if (pdata && pdata->resume_gpio) {
  546. ret = gpio_request(pdata->resume_gpio, "HSIC_RESUME_GPIO");
  547. if (ret < 0) {
  548. dev_err(mehci->dev,
  549. "gpio req failed for hsic resume:%d\n", ret);
  550. pdata->resume_gpio = 0;
  551. }
  552. }
  553. /* HSIC init sequence when HSIC signals (Strobe/Data) are
  554. routed via GPIOs */
  555. if (pdata && pdata->strobe && pdata->data) {
  556. if (!pdata->ignore_cal_pad_config) {
  557. /* Enable LV_MODE in HSIC_CAL_PAD_CTL register */
  558. writel_relaxed(HSIC_LV_MODE, HSIC_CAL_PAD_CTL);
  559. mb();
  560. }
  561. /*set periodic calibration interval to ~2.048sec in
  562. HSIC_IO_CAL_REG */
  563. ulpi_write(mehci, 0xFF, 0x33);
  564. /* Enable periodic IO calibration in HSIC_CFG register */
  565. ulpi_write(mehci, HSIC_PAD_CALIBRATION, 0x30);
  566. /* Configure GPIO pins for HSIC functionality mode */
  567. ret = msm_hsic_config_gpios(mehci, 1);
  568. if (ret) {
  569. dev_err(mehci->dev, " gpio configuarion failed\n");
  570. goto free_resume_gpio;
  571. }
  572. if (pdata->strobe_pad_offset) {
  573. /* Set CORE_CTL_EN in STROBE GPIO PAD_CTL register */
  574. reg = MSM_TLMM_BASE + pdata->strobe_pad_offset;
  575. writel_relaxed(readl_relaxed(reg) | 0x2000000, reg);
  576. } else {
  577. /* Set LV_MODE=0x1 and DCC=0x2 in STROBE GPIO PAD_CTL */
  578. reg = HSIC_STROBE_GPIO_PAD_CTL;
  579. writel_relaxed(HSIC_GPIO_PAD_VAL, reg);
  580. }
  581. if (pdata->data_pad_offset) {
  582. /* Set CORE_CTL_EN in HSIC_DATA GPIO PAD_CTL register */
  583. reg = MSM_TLMM_BASE + pdata->data_pad_offset;
  584. writel_relaxed(readl_relaxed(reg) | 0x2000000, reg);
  585. } else {
  586. /* Set LV_MODE=0x1 and DCC=0x2 in STROBE GPIO PAD_CTL */
  587. reg = HSIC_DATA_GPIO_PAD_CTL;
  588. writel_relaxed(HSIC_GPIO_PAD_VAL, reg);
  589. }
  590. mb();
  591. /* Enable HSIC mode in HSIC_CFG register */
  592. ulpi_write(mehci, 0x01, 0x31);
  593. } else {
  594. /* HSIC init sequence when HSIC signals (Strobe/Data) are routed
  595. via dedicated I/O */
  596. /* programmable length of connect signaling (33.2ns) */
  597. ret = ulpi_write(mehci, 3, HSIC_DBG1_REG);
  598. if (ret) {
  599. pr_err("%s: Unable to program length of connect "
  600. "signaling\n", __func__);
  601. }
  602. /*set periodic calibration interval to ~2.048sec in
  603. HSIC_IO_CAL_REG */
  604. ulpi_write(mehci, 0xFF, 0x33);
  605. /* Enable HSIC mode in HSIC_CFG register */
  606. ulpi_write(mehci, 0xA9, 0x30);
  607. }
  608. /*disable auto resume*/
  609. ulpi_write(mehci, ULPI_IFC_CTRL_AUTORESUME, ULPI_CLR(ULPI_IFC_CTRL));
  610. return 0;
  611. free_resume_gpio:
  612. if (pdata && pdata->resume_gpio)
  613. gpio_free(pdata->resume_gpio);
  614. return ret;
  615. }
  616. #define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000)
  617. #define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
  618. #ifdef CONFIG_PM_SLEEP
  619. static int msm_hsic_reset(struct msm_hsic_hcd *mehci)
  620. {
  621. /* reset HSIC phy */
  622. msm_hsic_phy_reset(mehci);
  623. /* HSIC init procedure (caliberation) */
  624. return msm_hsic_start(mehci);
  625. }
  626. static int msm_hsic_suspend(struct msm_hsic_hcd *mehci)
  627. {
  628. struct usb_hcd *hcd = hsic_to_hcd(mehci);
  629. int cnt = 0, ret;
  630. u32 val;
  631. int none_vol, max_vol;
  632. struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
  633. if (atomic_read(&mehci->in_lpm)) {
  634. dev_dbg(mehci->dev, "%s called in lpm\n", __func__);
  635. return 0;
  636. }
  637. disable_irq(hcd->irq);
  638. /* make sure we don't race against a remote wakeup */
  639. if (test_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags) ||
  640. readl_relaxed(USB_PORTSC) & PORT_RESUME) {
  641. dev_dbg(mehci->dev, "wakeup pending, aborting suspend\n");
  642. enable_irq(hcd->irq);
  643. return -EBUSY;
  644. }
  645. if (pdata->consider_ipa_handshake) {
  646. dev_dbg(mehci->dev, "%s:Wait for resources release\n",
  647. __func__);
  648. if (!msm_bam_hsic_lpm_ok()) {
  649. dev_dbg(mehci->dev, "%s:Prod+Cons not released\n",
  650. __func__);
  651. enable_irq(hcd->irq);
  652. return -EBUSY;
  653. }
  654. dev_dbg(mehci->dev, "%s:Prod+Cons resources released\n",
  655. __func__);
  656. }
  657. /*
  658. * PHY may take some time or even fail to enter into low power
  659. * mode (LPM). Hence poll for 500 msec and reset the PHY and link
  660. * in failure case.
  661. */
  662. val = readl_relaxed(USB_PORTSC);
  663. val &= ~PORT_RWC_BITS;
  664. val |= PORTSC_PHCD;
  665. writel_relaxed(val, USB_PORTSC);
  666. while (cnt < PHY_SUSPEND_TIMEOUT_USEC) {
  667. if (readl_relaxed(USB_PORTSC) & PORTSC_PHCD)
  668. break;
  669. udelay(1);
  670. cnt++;
  671. }
  672. if (cnt >= PHY_SUSPEND_TIMEOUT_USEC) {
  673. dev_err(mehci->dev, "Unable to suspend PHY\n");
  674. msm_hsic_config_gpios(mehci, 0);
  675. msm_hsic_reset(mehci);
  676. }
  677. /*
  678. * PHY has capability to generate interrupt asynchronously in low
  679. * power mode (LPM). This interrupt is level triggered. So USB IRQ
  680. * line must be disabled till async interrupt enable bit is cleared
  681. * in USBCMD register. Assert STP (ULPI interface STOP signal) to
  682. * block data communication from PHY. Enable asynchronous interrupt
  683. * only when wakeup gpio IRQ is not present.
  684. */
  685. if (mehci->wakeup_irq)
  686. writel_relaxed(readl_relaxed(USB_USBCMD) |
  687. ULPI_STP_CTRL, USB_USBCMD);
  688. else
  689. writel_relaxed(readl_relaxed(USB_USBCMD) | ASYNC_INTR_CTRL |
  690. ULPI_STP_CTRL, USB_USBCMD);
  691. /*
  692. * Ensure that hardware is put in low power mode before
  693. * clocks are turned OFF and VDD is allowed to minimize.
  694. */
  695. mb();
  696. clk_disable_unprepare(mehci->core_clk);
  697. clk_disable_unprepare(mehci->phy_clk);
  698. clk_disable_unprepare(mehci->cal_clk);
  699. clk_disable_unprepare(mehci->ahb_clk);
  700. if (!IS_ERR(mehci->inactivity_clk))
  701. clk_disable_unprepare(mehci->inactivity_clk);
  702. none_vol = vdd_val[mehci->vdd_type][VDD_NONE];
  703. max_vol = vdd_val[mehci->vdd_type][VDD_MAX];
  704. ret = regulator_set_voltage(mehci->hsic_vddcx, none_vol, max_vol);
  705. if (ret < 0)
  706. dev_err(mehci->dev, "unable to set vddcx voltage for VDD MIN\n");
  707. if (mehci->bus_perf_client && debug_bus_voting_enabled) {
  708. mehci->bus_vote = false;
  709. queue_work(ehci_wq, &mehci->bus_vote_w);
  710. }
  711. atomic_set(&mehci->in_lpm, 1);
  712. enable_irq(hcd->irq);
  713. if (mehci->wakeup_irq) {
  714. mehci->wakeup_irq_enabled = 1;
  715. enable_irq_wake(mehci->wakeup_irq);
  716. enable_irq(mehci->wakeup_irq);
  717. }
  718. if (pdata && pdata->standalone_latency)
  719. pm_qos_update_request(&mehci->pm_qos_req_dma,
  720. PM_QOS_DEFAULT_VALUE);
  721. wake_unlock(&mehci->wlock);
  722. dev_info(mehci->dev, "HSIC-USB in low power mode\n");
  723. return 0;
  724. }
  725. static int msm_hsic_resume(struct msm_hsic_hcd *mehci)
  726. {
  727. struct usb_hcd *hcd = hsic_to_hcd(mehci);
  728. int cnt = 0, ret;
  729. unsigned temp;
  730. int min_vol, max_vol;
  731. unsigned long flags;
  732. struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
  733. if (!atomic_read(&mehci->in_lpm)) {
  734. dev_dbg(mehci->dev, "%s called in !in_lpm\n", __func__);
  735. return 0;
  736. }
  737. if (pdata->consider_ipa_handshake) {
  738. dev_dbg(mehci->dev, "%s:Wait for producer resource\n",
  739. __func__);
  740. msm_bam_wait_for_hsic_prod_granted();
  741. dev_dbg(mehci->dev, "%s:Producer resource obtained\n",
  742. __func__);
  743. }
  744. /* Handles race with Async interrupt */
  745. disable_irq(hcd->irq);
  746. if (pdata && pdata->standalone_latency)
  747. pm_qos_update_request(&mehci->pm_qos_req_dma,
  748. pdata->standalone_latency + 1);
  749. if (mehci->wakeup_irq) {
  750. spin_lock_irqsave(&mehci->wakeup_lock, flags);
  751. if (mehci->wakeup_irq_enabled) {
  752. disable_irq_wake(mehci->wakeup_irq);
  753. disable_irq_nosync(mehci->wakeup_irq);
  754. mehci->wakeup_irq_enabled = 0;
  755. }
  756. spin_unlock_irqrestore(&mehci->wakeup_lock, flags);
  757. }
  758. wake_lock(&mehci->wlock);
  759. if (mehci->bus_perf_client && debug_bus_voting_enabled) {
  760. mehci->bus_vote = true;
  761. queue_work(ehci_wq, &mehci->bus_vote_w);
  762. }
  763. min_vol = vdd_val[mehci->vdd_type][VDD_MIN];
  764. max_vol = vdd_val[mehci->vdd_type][VDD_MAX];
  765. ret = regulator_set_voltage(mehci->hsic_vddcx, min_vol, max_vol);
  766. if (ret < 0)
  767. dev_err(mehci->dev, "unable to set nominal vddcx voltage (no VDD MIN)\n");
  768. clk_prepare_enable(mehci->core_clk);
  769. clk_prepare_enable(mehci->phy_clk);
  770. clk_prepare_enable(mehci->cal_clk);
  771. clk_prepare_enable(mehci->ahb_clk);
  772. if (!IS_ERR(mehci->inactivity_clk))
  773. clk_prepare_enable(mehci->inactivity_clk);
  774. temp = readl_relaxed(USB_USBCMD);
  775. temp &= ~ASYNC_INTR_CTRL;
  776. temp &= ~ULPI_STP_CTRL;
  777. writel_relaxed(temp, USB_USBCMD);
  778. if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD))
  779. goto skip_phy_resume;
  780. temp = readl_relaxed(USB_PORTSC);
  781. temp &= ~(PORT_RWC_BITS | PORTSC_PHCD);
  782. writel_relaxed(temp, USB_PORTSC);
  783. while (cnt < PHY_RESUME_TIMEOUT_USEC) {
  784. if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD) &&
  785. (readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_SYNC_STATE))
  786. break;
  787. udelay(1);
  788. cnt++;
  789. }
  790. if (cnt >= PHY_RESUME_TIMEOUT_USEC) {
  791. /*
  792. * This is a fatal error. Reset the link and
  793. * PHY to make hsic working.
  794. */
  795. dev_err(mehci->dev, "Unable to resume USB. Reset the hsic\n");
  796. msm_hsic_config_gpios(mehci, 0);
  797. msm_hsic_reset(mehci);
  798. }
  799. skip_phy_resume:
  800. usb_hcd_resume_root_hub(hcd);
  801. atomic_set(&mehci->in_lpm, 0);
  802. if (atomic_read(&mehci->async_int)) {
  803. atomic_set(&mehci->async_int, 0);
  804. pm_runtime_put_noidle(mehci->dev);
  805. enable_irq(hcd->irq);
  806. }
  807. if (atomic_read(&mehci->pm_usage_cnt)) {
  808. atomic_set(&mehci->pm_usage_cnt, 0);
  809. pm_runtime_put_noidle(mehci->dev);
  810. }
  811. enable_irq(hcd->irq);
  812. dev_info(mehci->dev, "HSIC-USB exited from low power mode\n");
  813. if (pdata->consider_ipa_handshake) {
  814. dev_dbg(mehci->dev, "%s:Notify usb bam on resume complete\n",
  815. __func__);
  816. msm_bam_hsic_notify_on_resume();
  817. }
  818. return 0;
  819. }
  820. #endif
  821. static void ehci_hsic_bus_vote_w(struct work_struct *w)
  822. {
  823. struct msm_hsic_hcd *mehci =
  824. container_of(w, struct msm_hsic_hcd, bus_vote_w);
  825. int ret;
  826. ret = msm_bus_scale_client_update_request(mehci->bus_perf_client,
  827. mehci->bus_vote);
  828. if (ret)
  829. dev_err(mehci->dev, "%s: Failed to vote for bus bandwidth %d\n",
  830. __func__, ret);
  831. }
  832. static int msm_hsic_reset_done(struct usb_hcd *hcd)
  833. {
  834. struct ehci_hcd *ehci = hcd_to_ehci(hcd);
  835. u32 __iomem *status_reg = &ehci->regs->port_status[0];
  836. int ret;
  837. ehci_writel(ehci, ehci_readl(ehci, status_reg) & ~(PORT_RWC_BITS |
  838. PORT_RESET), status_reg);
  839. ret = handshake(ehci, status_reg, PORT_RESET, 0, 1 * 1000);
  840. if (ret)
  841. pr_err("reset handshake failed in %s\n", __func__);
  842. else
  843. ehci_writel(ehci, ehci_readl(ehci, &ehci->regs->command) |
  844. CMD_RUN, &ehci->regs->command);
  845. return ret;
  846. }
  847. #define STS_GPTIMER0_INTERRUPT BIT(24)
  848. static irqreturn_t msm_hsic_irq(struct usb_hcd *hcd)
  849. {
  850. struct ehci_hcd *ehci = hcd_to_ehci(hcd);
  851. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  852. u32 status;
  853. int ret;
  854. if (atomic_read(&mehci->in_lpm)) {
  855. dev_dbg(mehci->dev, "phy async intr\n");
  856. dbg_log_event(NULL, "Async IRQ", 0);
  857. ret = pm_runtime_get(mehci->dev);
  858. if ((ret == 1) || (ret == -EINPROGRESS)) {
  859. pm_runtime_put_noidle(mehci->dev);
  860. } else {
  861. disable_irq_nosync(hcd->irq);
  862. atomic_set(&mehci->async_int, 1);
  863. }
  864. return IRQ_HANDLED;
  865. }
  866. status = ehci_readl(ehci, &ehci->regs->status);
  867. if (status & STS_GPTIMER0_INTERRUPT) {
  868. int timeleft;
  869. dbg_log_event(NULL, "FPR: gpt0_isr", mehci->bus_reset);
  870. timeleft = GPT_CNT(ehci_readl(ehci,
  871. &mehci->timer->gptimer1_ctrl));
  872. if (timeleft) {
  873. if (mehci->bus_reset) {
  874. ret = msm_hsic_reset_done(hcd);
  875. if (ret) {
  876. mehci->reset_again = 1;
  877. dbg_log_event(NULL, "RESET: fail", 0);
  878. }
  879. } else {
  880. ehci_writel(ehci, ehci_readl(ehci,
  881. &ehci->regs->command) | CMD_RUN,
  882. &ehci->regs->command);
  883. }
  884. } else {
  885. if (mehci->bus_reset)
  886. mehci->reset_again = 1;
  887. else
  888. mehci->resume_again = 1;
  889. }
  890. dbg_log_event(NULL, "FPR: timeleft", timeleft);
  891. complete(&mehci->gpt0_completion);
  892. ehci_writel(ehci, STS_GPTIMER0_INTERRUPT, &ehci->regs->status);
  893. }
  894. return ehci_irq(hcd);
  895. }
  896. static int ehci_hsic_reset(struct usb_hcd *hcd)
  897. {
  898. struct ehci_hcd *ehci = hcd_to_ehci(hcd);
  899. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  900. struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
  901. int retval;
  902. mehci->timer = USB_HS_GPTIMER_BASE;
  903. ehci->caps = USB_CAPLENGTH;
  904. ehci->regs = USB_CAPLENGTH +
  905. HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
  906. dbg_hcs_params(ehci, "reset");
  907. dbg_hcc_params(ehci, "reset");
  908. /* cache the data to minimize the chip reads*/
  909. ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
  910. hcd->has_tt = 1;
  911. ehci->sbrn = HCD_USB2;
  912. retval = ehci_halt(ehci);
  913. if (retval)
  914. return retval;
  915. /* data structure init */
  916. retval = ehci_init(hcd);
  917. if (retval)
  918. return retval;
  919. retval = ehci_reset(ehci);
  920. if (retval)
  921. return retval;
  922. /* bursts of unspecified length. */
  923. writel_relaxed(0, USB_AHBBURST);
  924. /* Use the AHB transactor and configure async bridge bypass */
  925. #define MSM_USB_ASYNC_BRIDGE_BYPASS BIT(31)
  926. if (pdata->ahb_async_bridge_bypass)
  927. writel_relaxed(0x08 | MSM_USB_ASYNC_BRIDGE_BYPASS, USB_AHBMODE);
  928. else
  929. writel_relaxed(0x08, USB_AHBMODE);
  930. /* Disable streaming mode and select host mode */
  931. writel_relaxed(0x13, USB_USBMODE);
  932. ehci_port_power(ehci, 1);
  933. return 0;
  934. }
  935. #ifdef CONFIG_PM
  936. #define RESET_RETRY_LIMIT 3
  937. #define RESET_SIGNAL_TIME_SOF_USEC (50 * 1000)
  938. #define RESET_SIGNAL_TIME_USEC (20 * 1000)
  939. static void ehci_hsic_reset_sof_bug_handler(struct usb_hcd *hcd, u32 val)
  940. {
  941. struct ehci_hcd *ehci = hcd_to_ehci(hcd);
  942. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  943. struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
  944. u32 __iomem *status_reg = &ehci->regs->port_status[0];
  945. u32 cmd;
  946. unsigned long flags;
  947. int retries = 0, ret, cnt = RESET_SIGNAL_TIME_USEC;
  948. s32 next_latency = 0;
  949. if (pdata && pdata->swfi_latency) {
  950. next_latency = pdata->swfi_latency + 1;
  951. pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
  952. if (pdata->standalone_latency)
  953. next_latency = pdata->standalone_latency + 1;
  954. else
  955. next_latency = PM_QOS_DEFAULT_VALUE;
  956. }
  957. mehci->bus_reset = 1;
  958. /* Halt the controller */
  959. cmd = ehci_readl(ehci, &ehci->regs->command);
  960. cmd &= ~CMD_RUN;
  961. ehci_writel(ehci, cmd, &ehci->regs->command);
  962. ret = handshake(ehci, &ehci->regs->status, STS_HALT,
  963. STS_HALT, 16 * 125);
  964. if (ret) {
  965. pr_err("halt handshake fatal error\n");
  966. dbg_log_event(NULL, "HALT: fatal", 0);
  967. goto fail;
  968. }
  969. retry:
  970. retries++;
  971. dbg_log_event(NULL, "RESET: start", retries);
  972. pr_debug("reset begin %d\n", retries);
  973. mehci->reset_again = 0;
  974. spin_lock_irqsave(&ehci->lock, flags);
  975. ehci_writel(ehci, val, status_reg);
  976. ehci_writel(ehci, GPT_LD(RESET_SIGNAL_TIME_USEC - 1),
  977. &mehci->timer->gptimer0_ld);
  978. ehci_writel(ehci, GPT_RESET | GPT_RUN,
  979. &mehci->timer->gptimer0_ctrl);
  980. ehci_writel(ehci, INTR_MASK | STS_GPTIMER0_INTERRUPT,
  981. &ehci->regs->intr_enable);
  982. ehci_writel(ehci, GPT_LD(RESET_SIGNAL_TIME_SOF_USEC - 1),
  983. &mehci->timer->gptimer1_ld);
  984. ehci_writel(ehci, GPT_RESET | GPT_RUN,
  985. &mehci->timer->gptimer1_ctrl);
  986. spin_unlock_irqrestore(&ehci->lock, flags);
  987. wait_for_completion(&mehci->gpt0_completion);
  988. if (!mehci->reset_again)
  989. goto done;
  990. if (handshake(ehci, status_reg, PORT_RESET, 0, 10 * 1000)) {
  991. pr_err("reset handshake fatal error\n");
  992. dbg_log_event(NULL, "RESET: fatal", retries);
  993. goto fail;
  994. }
  995. if (retries < RESET_RETRY_LIMIT)
  996. goto retry;
  997. /* complete reset in tight loop */
  998. pr_info("RESET in tight loop\n");
  999. dbg_log_event(NULL, "RESET: tight", 0);
  1000. spin_lock_irqsave(&ehci->lock, flags);
  1001. ehci_writel(ehci, val, status_reg);
  1002. while (cnt--)
  1003. udelay(1);
  1004. ret = msm_hsic_reset_done(hcd);
  1005. spin_unlock_irqrestore(&ehci->lock, flags);
  1006. if (ret) {
  1007. pr_err("RESET in tight loop failed\n");
  1008. dbg_log_event(NULL, "RESET: tight failed", 0);
  1009. goto fail;
  1010. }
  1011. done:
  1012. dbg_log_event(NULL, "RESET: done", retries);
  1013. pr_debug("reset completed\n");
  1014. fail:
  1015. mehci->bus_reset = 0;
  1016. if (next_latency)
  1017. pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
  1018. }
  1019. static int ehci_hsic_bus_suspend(struct usb_hcd *hcd)
  1020. {
  1021. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  1022. if (!(readl_relaxed(USB_PORTSC) & PORT_PE)) {
  1023. dbg_log_event(NULL, "RH suspend attempt failed", 0);
  1024. dev_dbg(mehci->dev, "%s:port is not enabled skip suspend\n",
  1025. __func__);
  1026. return -EAGAIN;
  1027. }
  1028. dbg_log_event(NULL, "Suspend RH", 0);
  1029. return ehci_bus_suspend(hcd);
  1030. }
  1031. #define RESUME_RETRY_LIMIT 3
  1032. #define RESUME_SIGNAL_TIME_USEC (21 * 1000)
  1033. #define RESUME_SIGNAL_TIME_SOF_USEC (23 * 1000)
  1034. static int msm_hsic_resume_thread(void *data)
  1035. {
  1036. struct msm_hsic_hcd *mehci = data;
  1037. struct usb_hcd *hcd = hsic_to_hcd(mehci);
  1038. struct ehci_hcd *ehci = hcd_to_ehci(hcd);
  1039. u32 temp;
  1040. unsigned long resume_needed = 0;
  1041. int retry_cnt = 0;
  1042. int tight_resume = 0;
  1043. struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
  1044. s32 next_latency = 0;
  1045. dbg_log_event(NULL, "Resume RH", 0);
  1046. if (pdata && pdata->swfi_latency) {
  1047. next_latency = pdata->swfi_latency + 1;
  1048. pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
  1049. if (pdata->standalone_latency)
  1050. next_latency = pdata->standalone_latency + 1;
  1051. else
  1052. next_latency = PM_QOS_DEFAULT_VALUE;
  1053. }
  1054. /* keep delay between bus states */
  1055. if (time_before(jiffies, ehci->next_statechange))
  1056. usleep_range(5000, 5000);
  1057. spin_lock_irq(&ehci->lock);
  1058. if (!HCD_HW_ACCESSIBLE(hcd)) {
  1059. mehci->resume_status = -ESHUTDOWN;
  1060. goto exit;
  1061. }
  1062. if (unlikely(ehci->debug)) {
  1063. if (!dbgp_reset_prep())
  1064. ehci->debug = NULL;
  1065. else
  1066. dbgp_external_startup();
  1067. }
  1068. /* at least some APM implementations will try to deliver
  1069. * IRQs right away, so delay them until we're ready.
  1070. */
  1071. ehci_writel(ehci, 0, &ehci->regs->intr_enable);
  1072. /* re-init operational registers */
  1073. ehci_writel(ehci, 0, &ehci->regs->segment);
  1074. ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
  1075. ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next);
  1076. /*CMD_RUN will be set after, PORT_RESUME gets cleared*/
  1077. if (ehci->resume_sof_bug)
  1078. ehci->command &= ~CMD_RUN;
  1079. /* restore CMD_RUN, framelist size, and irq threshold */
  1080. ehci_writel(ehci, ehci->command, &ehci->regs->command);
  1081. /* manually resume the ports we suspended during bus_suspend() */
  1082. resume_again:
  1083. if (retry_cnt >= RESUME_RETRY_LIMIT) {
  1084. pr_info("retry count(%d) reached max, resume in tight loop\n",
  1085. retry_cnt);
  1086. tight_resume = 1;
  1087. }
  1088. temp = ehci_readl(ehci, &ehci->regs->port_status[0]);
  1089. temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
  1090. if (test_bit(0, &ehci->bus_suspended) && (temp & PORT_SUSPEND)) {
  1091. temp |= PORT_RESUME;
  1092. set_bit(0, &resume_needed);
  1093. }
  1094. dbg_log_event(NULL, "FPR: Set", temp);
  1095. ehci_writel(ehci, temp, &ehci->regs->port_status[0]);
  1096. /* HSIC controller has a h/w bug due to which it can try to send SOFs
  1097. * (start of frames) during port resume resulting in phy lockup. HSIC hw
  1098. * controller in MSM clears FPR bit after driving the resume signal for
  1099. * 20ms. Workaround is to stop SOFs before driving resume and then start
  1100. * sending SOFs immediately. Need to send SOFs within 3ms of resume
  1101. * completion otherwise peripheral may enter undefined state. As
  1102. * usleep_range does not gurantee exact sleep time, GPTimer is used to
  1103. * to time the resume sequence. If driver exceeds allowable time SOFs,
  1104. * repeat the resume process.
  1105. */
  1106. if (ehci->resume_sof_bug && resume_needed) {
  1107. if (!tight_resume) {
  1108. mehci->resume_again = 0;
  1109. ehci_writel(ehci, GPT_LD(RESUME_SIGNAL_TIME_USEC - 1),
  1110. &mehci->timer->gptimer0_ld);
  1111. ehci_writel(ehci, GPT_RESET | GPT_RUN,
  1112. &mehci->timer->gptimer0_ctrl);
  1113. ehci_writel(ehci, INTR_MASK | STS_GPTIMER0_INTERRUPT,
  1114. &ehci->regs->intr_enable);
  1115. ehci_writel(ehci, GPT_LD(
  1116. RESUME_SIGNAL_TIME_SOF_USEC - 1),
  1117. &mehci->timer->gptimer1_ld);
  1118. ehci_writel(ehci, GPT_RESET | GPT_RUN,
  1119. &mehci->timer->gptimer1_ctrl);
  1120. spin_unlock_irq(&ehci->lock);
  1121. wait_for_completion(&mehci->gpt0_completion);
  1122. spin_lock_irq(&ehci->lock);
  1123. } else {
  1124. dbg_log_event(NULL, "FPR: Tightloop", 0);
  1125. /* do the resume in a tight loop */
  1126. handshake(ehci, &ehci->regs->port_status[0],
  1127. PORT_RESUME, 0, 22 * 1000);
  1128. ehci_writel(ehci, ehci_readl(ehci,
  1129. &ehci->regs->command) | CMD_RUN,
  1130. &ehci->regs->command);
  1131. }
  1132. if (mehci->resume_again) {
  1133. int temp;
  1134. dbg_log_event(NULL, "FPR: Re-Resume", retry_cnt);
  1135. pr_info("FPR: retry count: %d\n", retry_cnt);
  1136. spin_unlock_irq(&ehci->lock);
  1137. temp = ehci_readl(ehci, &ehci->regs->port_status[0]);
  1138. temp &= ~PORT_RWC_BITS;
  1139. temp |= PORT_SUSPEND;
  1140. ehci_writel(ehci, temp, &ehci->regs->port_status[0]);
  1141. /* Keep the bus idle for 5ms so that peripheral
  1142. * can detect and initiate suspend
  1143. */
  1144. usleep_range(5000, 5000);
  1145. dbg_log_event(NULL,
  1146. "FPR: RResume",
  1147. ehci_readl(ehci, &ehci->regs->port_status[0]));
  1148. spin_lock_irq(&ehci->lock);
  1149. mehci->resume_again = 0;
  1150. retry_cnt++;
  1151. goto resume_again;
  1152. }
  1153. }
  1154. dbg_log_event(NULL, "FPR: RT-Done", 0);
  1155. mehci->resume_status = 1;
  1156. exit:
  1157. spin_unlock_irq(&ehci->lock);
  1158. complete(&mehci->rt_completion);
  1159. if (next_latency)
  1160. pm_qos_update_request(&mehci->pm_qos_req_dma, next_latency);
  1161. return 0;
  1162. }
  1163. static int ehci_hsic_bus_resume(struct usb_hcd *hcd)
  1164. {
  1165. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  1166. struct ehci_hcd *ehci = hcd_to_ehci(hcd);
  1167. u32 temp;
  1168. struct task_struct *resume_thread = NULL;
  1169. struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
  1170. if (pdata->resume_gpio)
  1171. gpio_direction_output(pdata->resume_gpio, 1);
  1172. if (!mehci->ehci.resume_sof_bug) {
  1173. ehci_bus_resume(hcd);
  1174. } else {
  1175. mehci->resume_status = 0;
  1176. resume_thread = kthread_run(msm_hsic_resume_thread,
  1177. mehci, "hsic_resume_thread");
  1178. if (IS_ERR(resume_thread)) {
  1179. pr_err("Error creating resume thread:%lu\n",
  1180. PTR_ERR(resume_thread));
  1181. return PTR_ERR(resume_thread);
  1182. }
  1183. wait_for_completion(&mehci->rt_completion);
  1184. if (mehci->resume_status < 0)
  1185. return mehci->resume_status;
  1186. dbg_log_event(NULL, "FPR: Wokeup", 0);
  1187. spin_lock_irq(&ehci->lock);
  1188. (void) ehci_readl(ehci, &ehci->regs->command);
  1189. temp = 0;
  1190. if (ehci->async->qh_next.qh)
  1191. temp |= CMD_ASE;
  1192. if (ehci->periodic_sched)
  1193. temp |= CMD_PSE;
  1194. if (temp) {
  1195. ehci->command |= temp;
  1196. ehci_writel(ehci, ehci->command, &ehci->regs->command);
  1197. }
  1198. ehci->next_statechange = jiffies + msecs_to_jiffies(5);
  1199. hcd->state = HC_STATE_RUNNING;
  1200. ehci->rh_state = EHCI_RH_RUNNING;
  1201. ehci->command |= CMD_RUN;
  1202. /* Now we can safely re-enable irqs */
  1203. ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable);
  1204. spin_unlock_irq(&ehci->lock);
  1205. }
  1206. if (pdata->resume_gpio)
  1207. gpio_direction_output(pdata->resume_gpio, 0);
  1208. return 0;
  1209. }
  1210. #else
  1211. #define ehci_hsic_bus_suspend NULL
  1212. #define ehci_hsic_bus_resume NULL
  1213. #endif /* CONFIG_PM */
  1214. static void ehci_msm_set_autosuspend_delay(struct usb_device *dev)
  1215. {
  1216. if (!dev->parent) /*for root hub no delay*/
  1217. pm_runtime_set_autosuspend_delay(&dev->dev, 0);
  1218. else
  1219. pm_runtime_set_autosuspend_delay(&dev->dev, 200);
  1220. }
  1221. static int ehci_msm_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
  1222. gfp_t mem_flags)
  1223. {
  1224. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  1225. struct usb_host_bam_type *usb_host_bam =
  1226. (struct usb_host_bam_type *)urb->priv_data;
  1227. if (usb_host_bam && mehci && mehci->enable_hbm)
  1228. return hbm_urb_enqueue(hcd, urb, mem_flags);
  1229. return ehci_urb_enqueue(hcd, urb, mem_flags);
  1230. }
  1231. static struct hc_driver msm_hsic_driver = {
  1232. .description = hcd_name,
  1233. .product_desc = "Qualcomm EHCI Host Controller using HSIC",
  1234. .hcd_priv_size = sizeof(struct msm_hsic_hcd),
  1235. /*
  1236. * generic hardware linkage
  1237. */
  1238. .irq = msm_hsic_irq,
  1239. .flags = HCD_USB2 | HCD_MEMORY | HCD_RT_OLD_ENUM,
  1240. .reset = ehci_hsic_reset,
  1241. .start = ehci_run,
  1242. .stop = ehci_stop,
  1243. .shutdown = ehci_shutdown,
  1244. /*
  1245. * managing i/o requests and associated device resources
  1246. */
  1247. .urb_enqueue = ehci_msm_urb_enqueue,
  1248. .urb_dequeue = ehci_urb_dequeue,
  1249. .endpoint_disable = ehci_endpoint_disable,
  1250. .endpoint_reset = ehci_endpoint_reset,
  1251. .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
  1252. /*
  1253. * scheduling support
  1254. */
  1255. .get_frame_number = ehci_get_frame,
  1256. /*
  1257. * root hub support
  1258. */
  1259. .hub_status_data = ehci_hub_status_data,
  1260. .hub_control = ehci_hub_control,
  1261. .relinquish_port = ehci_relinquish_port,
  1262. .port_handed_over = ehci_port_handed_over,
  1263. /*
  1264. * PM support
  1265. */
  1266. .bus_suspend = ehci_hsic_bus_suspend,
  1267. .bus_resume = ehci_hsic_bus_resume,
  1268. .log_urb = dbg_log_event,
  1269. .dump_regs = dump_hsic_regs,
  1270. .set_autosuspend_delay = ehci_msm_set_autosuspend_delay,
  1271. .reset_sof_bug_handler = ehci_hsic_reset_sof_bug_handler,
  1272. };
  1273. static int msm_hsic_init_clocks(struct msm_hsic_hcd *mehci, u32 init)
  1274. {
  1275. int ret = 0;
  1276. if (!init)
  1277. goto put_clocks;
  1278. /*core_clk is required for LINK protocol engine
  1279. *clock rate appropriately set by target specific clock driver */
  1280. mehci->core_clk = clk_get(mehci->dev, "core_clk");
  1281. if (IS_ERR(mehci->core_clk)) {
  1282. dev_err(mehci->dev, "failed to get core_clk\n");
  1283. ret = PTR_ERR(mehci->core_clk);
  1284. return ret;
  1285. }
  1286. /* alt_core_clk is for LINK to be used during PHY RESET in
  1287. * targets on which link does NOT use asynchronous reset methodology.
  1288. * clock rate appropriately set by target specific clock driver */
  1289. mehci->alt_core_clk = clk_get(mehci->dev, "alt_core_clk");
  1290. if (IS_ERR(mehci->alt_core_clk))
  1291. dev_dbg(mehci->dev, "failed to get alt_core_clk\n");
  1292. /* phy_clk is required for HSIC PHY operation
  1293. * clock rate appropriately set by target specific clock driver */
  1294. mehci->phy_clk = clk_get(mehci->dev, "phy_clk");
  1295. if (IS_ERR(mehci->phy_clk)) {
  1296. dev_err(mehci->dev, "failed to get phy_clk\n");
  1297. ret = PTR_ERR(mehci->phy_clk);
  1298. goto put_alt_core_clk;
  1299. }
  1300. /* 10MHz cal_clk is required for calibration of I/O pads */
  1301. mehci->cal_clk = clk_get(mehci->dev, "cal_clk");
  1302. if (IS_ERR(mehci->cal_clk)) {
  1303. dev_err(mehci->dev, "failed to get cal_clk\n");
  1304. ret = PTR_ERR(mehci->cal_clk);
  1305. goto put_phy_clk;
  1306. }
  1307. /* ahb_clk is required for data transfers */
  1308. mehci->ahb_clk = clk_get(mehci->dev, "iface_clk");
  1309. if (IS_ERR(mehci->ahb_clk)) {
  1310. dev_err(mehci->dev, "failed to get iface_clk\n");
  1311. ret = PTR_ERR(mehci->ahb_clk);
  1312. goto put_cal_clk;
  1313. }
  1314. /*
  1315. * Inactivity_clk is required for hsic bam inactivity timer.
  1316. * This clock is not compulsory and is defined in clock lookup
  1317. * only for targets that need to use the inactivity timer feature.
  1318. */
  1319. mehci->inactivity_clk = clk_get(mehci->dev, "inactivity_clk");
  1320. if (IS_ERR(mehci->inactivity_clk))
  1321. dev_dbg(mehci->dev, "failed to get inactivity_clk\n");
  1322. clk_prepare_enable(mehci->core_clk);
  1323. clk_prepare_enable(mehci->phy_clk);
  1324. clk_prepare_enable(mehci->cal_clk);
  1325. clk_prepare_enable(mehci->ahb_clk);
  1326. if (!IS_ERR(mehci->inactivity_clk))
  1327. clk_prepare_enable(mehci->inactivity_clk);
  1328. return 0;
  1329. put_clocks:
  1330. if (!atomic_read(&mehci->in_lpm)) {
  1331. clk_disable_unprepare(mehci->core_clk);
  1332. clk_disable_unprepare(mehci->phy_clk);
  1333. clk_disable_unprepare(mehci->cal_clk);
  1334. clk_disable_unprepare(mehci->ahb_clk);
  1335. if (!IS_ERR(mehci->inactivity_clk))
  1336. clk_disable_unprepare(mehci->inactivity_clk);
  1337. }
  1338. if (!IS_ERR(mehci->inactivity_clk))
  1339. clk_put(mehci->inactivity_clk);
  1340. clk_put(mehci->ahb_clk);
  1341. put_cal_clk:
  1342. clk_put(mehci->cal_clk);
  1343. put_phy_clk:
  1344. clk_put(mehci->phy_clk);
  1345. put_alt_core_clk:
  1346. if (!IS_ERR(mehci->alt_core_clk))
  1347. clk_put(mehci->alt_core_clk);
  1348. clk_put(mehci->core_clk);
  1349. return ret;
  1350. }
  1351. static irqreturn_t hsic_peripheral_status_change(int irq, void *dev_id)
  1352. {
  1353. struct msm_hsic_hcd *mehci = dev_id;
  1354. pr_debug("%s: mehci:%pK dev_id:%pK\n", __func__, mehci, dev_id);
  1355. if (mehci)
  1356. msm_hsic_config_gpios(mehci, 0);
  1357. return IRQ_HANDLED;
  1358. }
  1359. static irqreturn_t msm_hsic_wakeup_irq(int irq, void *data)
  1360. {
  1361. struct msm_hsic_hcd *mehci = data;
  1362. int ret;
  1363. if (irq == mehci->async_irq) {
  1364. mehci->async_int_cnt++;
  1365. dbg_log_event(NULL, "Remote Wakeup (ASYNC) IRQ",
  1366. mehci->async_int_cnt);
  1367. } else {
  1368. mehci->wakeup_int_cnt++;
  1369. dbg_log_event(NULL, "Remote Wakeup IRQ", mehci->wakeup_int_cnt);
  1370. }
  1371. dev_dbg(mehci->dev, "%s: hsic remote wakeup interrupt %d cnt: %u, %u\n",
  1372. __func__, irq, mehci->wakeup_int_cnt, mehci->async_int_cnt);
  1373. wake_lock(&mehci->wlock);
  1374. if (mehci->wakeup_irq) {
  1375. spin_lock(&mehci->wakeup_lock);
  1376. if (mehci->wakeup_irq_enabled) {
  1377. mehci->wakeup_irq_enabled = 0;
  1378. disable_irq_wake(irq);
  1379. disable_irq_nosync(irq);
  1380. }
  1381. spin_unlock(&mehci->wakeup_lock);
  1382. }
  1383. if (!atomic_read(&mehci->pm_usage_cnt)) {
  1384. ret = pm_runtime_get(mehci->dev);
  1385. /*
  1386. * HSIC runtime resume can race with us.
  1387. * if we are active (ret == 1) or resuming
  1388. * (ret == -EINPROGRESS), decrement the
  1389. * PM usage counter before returning.
  1390. */
  1391. if ((ret == 1) || (ret == -EINPROGRESS))
  1392. pm_runtime_put_noidle(mehci->dev);
  1393. else
  1394. atomic_set(&mehci->pm_usage_cnt, 1);
  1395. }
  1396. return IRQ_HANDLED;
  1397. }
  1398. static int ehci_hsic_msm_bus_show(struct seq_file *s, void *unused)
  1399. {
  1400. if (debug_bus_voting_enabled)
  1401. seq_printf(s, "enabled\n");
  1402. else
  1403. seq_printf(s, "disabled\n");
  1404. return 0;
  1405. }
  1406. static int ehci_hsic_msm_bus_open(struct inode *inode, struct file *file)
  1407. {
  1408. return single_open(file, ehci_hsic_msm_bus_show, inode->i_private);
  1409. }
  1410. static ssize_t ehci_hsic_msm_bus_write(struct file *file,
  1411. const char __user *ubuf, size_t count, loff_t *ppos)
  1412. {
  1413. char buf[8];
  1414. int ret;
  1415. struct seq_file *s = file->private_data;
  1416. struct msm_hsic_hcd *mehci = s->private;
  1417. memset(buf, 0x00, sizeof(buf));
  1418. if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
  1419. return -EFAULT;
  1420. if (!strncmp(buf, "enable", 6)) {
  1421. /* Do not vote here. Let hsic driver decide when to vote */
  1422. debug_bus_voting_enabled = true;
  1423. } else {
  1424. debug_bus_voting_enabled = false;
  1425. if (mehci->bus_perf_client) {
  1426. ret = msm_bus_scale_client_update_request(
  1427. mehci->bus_perf_client, 0);
  1428. if (ret)
  1429. dev_err(mehci->dev, "%s: Failed to devote "
  1430. "for bus bw %d\n", __func__, ret);
  1431. }
  1432. }
  1433. return count;
  1434. }
  1435. const struct file_operations ehci_hsic_msm_bus_fops = {
  1436. .open = ehci_hsic_msm_bus_open,
  1437. .read = seq_read,
  1438. .write = ehci_hsic_msm_bus_write,
  1439. .llseek = seq_lseek,
  1440. .release = single_release,
  1441. };
  1442. static int ehci_hsic_msm_wakeup_cnt_show(struct seq_file *s, void *unused)
  1443. {
  1444. struct msm_hsic_hcd *mehci = s->private;
  1445. seq_printf(s, "%u\n", mehci->wakeup_int_cnt);
  1446. return 0;
  1447. }
  1448. static int ehci_hsic_msm_wakeup_cnt_open(struct inode *inode, struct file *f)
  1449. {
  1450. return single_open(f, ehci_hsic_msm_wakeup_cnt_show, inode->i_private);
  1451. }
  1452. const struct file_operations ehci_hsic_msm_wakeup_cnt_fops = {
  1453. .open = ehci_hsic_msm_wakeup_cnt_open,
  1454. .read = seq_read,
  1455. .llseek = seq_lseek,
  1456. .release = single_release,
  1457. };
  1458. static int ehci_hsic_msm_data_events_show(struct seq_file *s, void *unused)
  1459. {
  1460. unsigned long flags;
  1461. unsigned i;
  1462. read_lock_irqsave(&dbg_hsic_data.lck, flags);
  1463. i = dbg_hsic_data.idx;
  1464. for (dbg_inc(&i); i != dbg_hsic_data.idx; dbg_inc(&i)) {
  1465. if (!strnlen(dbg_hsic_data.buf[i], DBG_MSG_LEN))
  1466. continue;
  1467. seq_printf(s, "%s\n", dbg_hsic_data.buf[i]);
  1468. }
  1469. read_unlock_irqrestore(&dbg_hsic_data.lck, flags);
  1470. return 0;
  1471. }
  1472. static int ehci_hsic_msm_data_events_open(struct inode *inode, struct file *f)
  1473. {
  1474. return single_open(f, ehci_hsic_msm_data_events_show, inode->i_private);
  1475. }
  1476. const struct file_operations ehci_hsic_msm_dbg_data_fops = {
  1477. .open = ehci_hsic_msm_data_events_open,
  1478. .read = seq_read,
  1479. .llseek = seq_lseek,
  1480. .release = single_release,
  1481. };
  1482. static int ehci_hsic_msm_ctrl_events_show(struct seq_file *s, void *unused)
  1483. {
  1484. unsigned long flags;
  1485. unsigned i;
  1486. read_lock_irqsave(&dbg_hsic_ctrl.lck, flags);
  1487. i = dbg_hsic_ctrl.idx;
  1488. for (dbg_inc(&i); i != dbg_hsic_ctrl.idx; dbg_inc(&i)) {
  1489. if (!strnlen(dbg_hsic_ctrl.buf[i], DBG_MSG_LEN))
  1490. continue;
  1491. seq_printf(s, "%s\n", dbg_hsic_ctrl.buf[i]);
  1492. }
  1493. read_unlock_irqrestore(&dbg_hsic_ctrl.lck, flags);
  1494. return 0;
  1495. }
  1496. static int ehci_hsic_msm_ctrl_events_open(struct inode *inode, struct file *f)
  1497. {
  1498. return single_open(f, ehci_hsic_msm_ctrl_events_show, inode->i_private);
  1499. }
  1500. const struct file_operations ehci_hsic_msm_dbg_ctrl_fops = {
  1501. .open = ehci_hsic_msm_ctrl_events_open,
  1502. .read = seq_read,
  1503. .llseek = seq_lseek,
  1504. .release = single_release,
  1505. };
  1506. static struct dentry *ehci_hsic_msm_dbg_root;
  1507. static int ehci_hsic_msm_debugfs_init(struct msm_hsic_hcd *mehci)
  1508. {
  1509. struct dentry *ehci_hsic_msm_dentry;
  1510. ehci_hsic_msm_dbg_root = debugfs_create_dir("ehci_hsic_msm_dbg", NULL);
  1511. if (!ehci_hsic_msm_dbg_root || IS_ERR(ehci_hsic_msm_dbg_root))
  1512. return -ENODEV;
  1513. ehci_hsic_msm_dentry = debugfs_create_file("bus_voting",
  1514. S_IRUGO | S_IWUSR,
  1515. ehci_hsic_msm_dbg_root, mehci,
  1516. &ehci_hsic_msm_bus_fops);
  1517. if (!ehci_hsic_msm_dentry) {
  1518. debugfs_remove_recursive(ehci_hsic_msm_dbg_root);
  1519. return -ENODEV;
  1520. }
  1521. ehci_hsic_msm_dentry = debugfs_create_file("wakeup_cnt",
  1522. S_IRUGO,
  1523. ehci_hsic_msm_dbg_root, mehci,
  1524. &ehci_hsic_msm_wakeup_cnt_fops);
  1525. if (!ehci_hsic_msm_dentry) {
  1526. debugfs_remove_recursive(ehci_hsic_msm_dbg_root);
  1527. return -ENODEV;
  1528. }
  1529. ehci_hsic_msm_dentry = debugfs_create_file("show_ctrl_events",
  1530. S_IRUGO,
  1531. ehci_hsic_msm_dbg_root, mehci,
  1532. &ehci_hsic_msm_dbg_ctrl_fops);
  1533. if (!ehci_hsic_msm_dentry) {
  1534. debugfs_remove_recursive(ehci_hsic_msm_dbg_root);
  1535. return -ENODEV;
  1536. }
  1537. ehci_hsic_msm_dentry = debugfs_create_file("show_data_events",
  1538. S_IRUGO,
  1539. ehci_hsic_msm_dbg_root, mehci,
  1540. &ehci_hsic_msm_dbg_data_fops);
  1541. if (!ehci_hsic_msm_dentry) {
  1542. debugfs_remove_recursive(ehci_hsic_msm_dbg_root);
  1543. return -ENODEV;
  1544. }
  1545. return 0;
  1546. }
  1547. static void ehci_hsic_msm_debugfs_cleanup(void)
  1548. {
  1549. debugfs_remove_recursive(ehci_hsic_msm_dbg_root);
  1550. }
  1551. struct msm_hsic_host_platform_data *msm_hsic_dt_to_pdata(
  1552. struct platform_device *pdev)
  1553. {
  1554. struct device_node *node = pdev->dev.of_node;
  1555. struct msm_hsic_host_platform_data *pdata;
  1556. int res_gpio;
  1557. pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
  1558. if (!pdata) {
  1559. dev_err(&pdev->dev, "unable to allocate platform data\n");
  1560. return NULL;
  1561. }
  1562. res_gpio = of_get_named_gpio(node, "hsic,strobe-gpio", 0);
  1563. if (res_gpio < 0)
  1564. res_gpio = 0;
  1565. pdata->strobe = res_gpio;
  1566. res_gpio = of_get_named_gpio(node, "hsic,data-gpio", 0);
  1567. if (res_gpio < 0)
  1568. res_gpio = 0;
  1569. pdata->data = res_gpio;
  1570. res_gpio = of_get_named_gpio(node, "hsic,resume-gpio", 0);
  1571. if (res_gpio < 0)
  1572. res_gpio = 0;
  1573. pdata->resume_gpio = res_gpio;
  1574. pdata->phy_sof_workaround = of_property_read_bool(node,
  1575. "qcom,phy-sof-workaround");
  1576. pdata->phy_susp_sof_workaround = of_property_read_bool(node,
  1577. "qcom,phy-susp-sof-workaround");
  1578. pdata->ignore_cal_pad_config = of_property_read_bool(node,
  1579. "hsic,ignore-cal-pad-config");
  1580. of_property_read_u32(node, "hsic,strobe-pad-offset",
  1581. &pdata->strobe_pad_offset);
  1582. of_property_read_u32(node, "hsic,data-pad-offset",
  1583. &pdata->data_pad_offset);
  1584. of_property_read_u32(node, "hsic,reset-delay",
  1585. &pdata->reset_delay);
  1586. of_property_read_u32(node, "hsic,log2-itc",
  1587. &pdata->log2_irq_thresh);
  1588. if (pdata->log2_irq_thresh > 6)
  1589. pdata->log2_irq_thresh = 0;
  1590. pdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
  1591. pdata->pool_64_bit_align = of_property_read_bool(node,
  1592. "qcom,pool-64-bit-align");
  1593. pdata->enable_hbm = of_property_read_bool(node,
  1594. "qcom,enable-hbm");
  1595. pdata->disable_park_mode = (of_property_read_bool(node,
  1596. "qcom,disable-park-mode"));
  1597. pdata->consider_ipa_handshake = (of_property_read_bool(node,
  1598. "hsic,consider-ipa-handshake"));
  1599. pdata->ahb_async_bridge_bypass = of_property_read_bool(node,
  1600. "qcom,ahb-async-bridge-bypass");
  1601. pdata->disable_cerr = of_property_read_bool(node,
  1602. "hsic,disable-cerr");
  1603. return pdata;
  1604. }
  1605. static int __devinit ehci_hsic_msm_probe(struct platform_device *pdev)
  1606. {
  1607. struct usb_hcd *hcd;
  1608. struct resource *res;
  1609. struct msm_hsic_hcd *mehci;
  1610. struct msm_hsic_host_platform_data *pdata;
  1611. unsigned long wakeup_irq_flags = 0;
  1612. int ret;
  1613. dev_dbg(&pdev->dev, "ehci_msm-hsic probe\n");
  1614. if (pdev->dev.of_node) {
  1615. dev_dbg(&pdev->dev, "device tree enabled\n");
  1616. pdev->dev.platform_data = msm_hsic_dt_to_pdata(pdev);
  1617. } else {
  1618. /* explicitly pass wakeup_irq flag for !DT */
  1619. wakeup_irq_flags = IRQF_TRIGGER_HIGH;
  1620. }
  1621. if (!pdev->dev.platform_data)
  1622. dev_dbg(&pdev->dev, "No platform data given\n");
  1623. if (!pdev->dev.dma_mask)
  1624. pdev->dev.dma_mask = &ehci_msm_hsic_dma_mask;
  1625. if (!pdev->dev.coherent_dma_mask)
  1626. pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  1627. /* After parent device's probe is executed, it will be put in suspend
  1628. * mode. When child device's probe is called, driver core is not
  1629. * resuming parent device due to which parent will be in suspend even
  1630. * though child is active. Hence resume the parent device explicitly.
  1631. */
  1632. if (pdev->dev.parent)
  1633. pm_runtime_get_sync(pdev->dev.parent);
  1634. hcd = usb_create_hcd(&msm_hsic_driver, &pdev->dev,
  1635. dev_name(&pdev->dev));
  1636. if (!hcd) {
  1637. dev_err(&pdev->dev, "Unable to create HCD\n");
  1638. return -ENOMEM;
  1639. }
  1640. hcd_to_bus(hcd)->skip_resume = true;
  1641. hcd->irq = platform_get_irq(pdev, 0);
  1642. if (hcd->irq < 0) {
  1643. dev_err(&pdev->dev, "Unable to get IRQ resource\n");
  1644. ret = hcd->irq;
  1645. goto put_hcd;
  1646. }
  1647. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1648. if (!res) {
  1649. dev_err(&pdev->dev, "Unable to get memory resource\n");
  1650. ret = -ENODEV;
  1651. goto put_hcd;
  1652. }
  1653. hcd->rsrc_start = res->start;
  1654. hcd->rsrc_len = resource_size(res);
  1655. hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
  1656. if (!hcd->regs) {
  1657. dev_err(&pdev->dev, "ioremap failed\n");
  1658. ret = -ENOMEM;
  1659. goto put_hcd;
  1660. }
  1661. mehci = hcd_to_hsic(hcd);
  1662. mehci->dev = &pdev->dev;
  1663. pdata = mehci->dev->platform_data;
  1664. spin_lock_init(&mehci->wakeup_lock);
  1665. if (pdata->phy_sof_workaround) {
  1666. /* Enable ALL workarounds related to PHY SOF bugs */
  1667. mehci->ehci.susp_sof_bug = 1;
  1668. mehci->ehci.reset_sof_bug = 1;
  1669. mehci->ehci.resume_sof_bug = 1;
  1670. } else if (pdata->phy_susp_sof_workaround) {
  1671. /* Only SUSP SOF hardware bug exists, rest all not present */
  1672. mehci->ehci.susp_sof_bug = 1;
  1673. }
  1674. if (pdata->reset_delay)
  1675. mehci->ehci.reset_delay = pdata->reset_delay;
  1676. mehci->ehci.pool_64_bit_align = pdata->pool_64_bit_align;
  1677. mehci->enable_hbm = pdata->enable_hbm;
  1678. if (pdata) {
  1679. mehci->ehci.log2_irq_thresh = pdata->log2_irq_thresh;
  1680. mehci->ehci.disable_cerr = pdata->disable_cerr;
  1681. }
  1682. ret = msm_hsic_init_gdsc(mehci, 1);
  1683. if (ret) {
  1684. dev_err(&pdev->dev, "unable to initialize GDSC\n");
  1685. ret = -ENODEV;
  1686. goto put_hcd;
  1687. }
  1688. res = platform_get_resource_byname(pdev,
  1689. IORESOURCE_IRQ,
  1690. "peripheral_status_irq");
  1691. if (res)
  1692. mehci->peripheral_status_irq = res->start;
  1693. res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "wakeup");
  1694. if (res) {
  1695. mehci->wakeup_irq = res->start;
  1696. dev_dbg(mehci->dev, "wakeup_irq: %d\n", mehci->wakeup_irq);
  1697. }
  1698. ret = msm_hsic_init_clocks(mehci, 1);
  1699. if (ret) {
  1700. dev_err(&pdev->dev, "unable to initialize clocks\n");
  1701. ret = -ENODEV;
  1702. goto unmap;
  1703. }
  1704. ret = msm_hsic_init_vddcx(mehci, 1);
  1705. if (ret) {
  1706. dev_err(&pdev->dev, "unable to initialize VDDCX\n");
  1707. ret = -ENODEV;
  1708. goto deinit_clocks;
  1709. }
  1710. init_completion(&mehci->rt_completion);
  1711. init_completion(&mehci->gpt0_completion);
  1712. msm_hsic_phy_reset(mehci);
  1713. ehci_wq = create_singlethread_workqueue("ehci_wq");
  1714. if (!ehci_wq) {
  1715. dev_err(&pdev->dev, "unable to create workqueue\n");
  1716. ret = -ENOMEM;
  1717. goto deinit_vddcx;
  1718. }
  1719. INIT_WORK(&mehci->bus_vote_w, ehci_hsic_bus_vote_w);
  1720. ret = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
  1721. if (ret) {
  1722. dev_err(&pdev->dev, "unable to register HCD\n");
  1723. goto destroy_wq;
  1724. }
  1725. ret = msm_hsic_start(mehci);
  1726. if (ret) {
  1727. dev_err(&pdev->dev, "unable to initialize PHY\n");
  1728. goto destroy_wq;
  1729. }
  1730. device_init_wakeup(&pdev->dev, 1);
  1731. wake_lock_init(&mehci->wlock, WAKE_LOCK_SUSPEND, dev_name(&pdev->dev));
  1732. wake_lock(&mehci->wlock);
  1733. if (mehci->peripheral_status_irq) {
  1734. ret = request_threaded_irq(mehci->peripheral_status_irq,
  1735. NULL, hsic_peripheral_status_change,
  1736. IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
  1737. | IRQF_SHARED,
  1738. "hsic_peripheral_status", mehci);
  1739. if (ret)
  1740. dev_err(&pdev->dev, "%s:request_irq:%d failed:%d",
  1741. __func__, mehci->peripheral_status_irq, ret);
  1742. }
  1743. /* configure wakeup irq */
  1744. if (mehci->wakeup_irq) {
  1745. /* In case if wakeup gpio is pulled high at this point
  1746. * remote wakeup interrupt fires right after request_irq.
  1747. * Remote wake up interrupt only needs to be enabled when
  1748. * HSIC bus goes to suspend.
  1749. */
  1750. irq_set_status_flags(mehci->wakeup_irq, IRQ_NOAUTOEN);
  1751. ret = request_irq(mehci->wakeup_irq, msm_hsic_wakeup_irq,
  1752. wakeup_irq_flags,
  1753. "msm_hsic_wakeup", mehci);
  1754. if (ret) {
  1755. dev_err(&pdev->dev, "request_irq(%d) failed: %d\n",
  1756. mehci->wakeup_irq, ret);
  1757. mehci->wakeup_irq = 0;
  1758. }
  1759. }
  1760. mehci->async_irq = platform_get_irq_byname(pdev, "async_irq");
  1761. if (mehci->async_irq < 0) {
  1762. dev_dbg(&pdev->dev, "platform_get_irq for async_int failed\n");
  1763. mehci->async_irq = 0;
  1764. } else {
  1765. ret = request_irq(mehci->async_irq, msm_hsic_wakeup_irq,
  1766. IRQF_TRIGGER_RISING, "msm_hsic_async", mehci);
  1767. if (ret) {
  1768. dev_err(&pdev->dev, "request irq failed (ASYNC INT)\n");
  1769. mehci->async_irq = 0;
  1770. } else if (!mehci->wakeup_irq) {
  1771. /* Async IRQ is used only in absence of dedicated irq */
  1772. enable_irq_wake(mehci->async_irq);
  1773. }
  1774. }
  1775. ret = ehci_hsic_msm_debugfs_init(mehci);
  1776. if (ret)
  1777. dev_dbg(&pdev->dev, "mode debugfs file is"
  1778. "not available\n");
  1779. if (pdata && pdata->bus_scale_table) {
  1780. mehci->bus_perf_client =
  1781. msm_bus_scale_register_client(pdata->bus_scale_table);
  1782. /* Configure BUS performance parameters for MAX bandwidth */
  1783. if (mehci->bus_perf_client) {
  1784. mehci->bus_vote = true;
  1785. queue_work(ehci_wq, &mehci->bus_vote_w);
  1786. } else {
  1787. dev_err(&pdev->dev, "%s: Failed to register BUS "
  1788. "scaling client!!\n", __func__);
  1789. }
  1790. }
  1791. __mehci = mehci;
  1792. if (pdata && pdata->standalone_latency)
  1793. pm_qos_add_request(&mehci->pm_qos_req_dma,
  1794. PM_QOS_CPU_DMA_LATENCY, pdata->standalone_latency + 1);
  1795. /*
  1796. * This pdev->dev is assigned parent of root-hub by USB core,
  1797. * hence, runtime framework automatically calls this driver's
  1798. * runtime APIs based on root-hub's state.
  1799. */
  1800. pm_runtime_set_active(&pdev->dev);
  1801. pm_runtime_enable(&pdev->dev);
  1802. /* Decrement the parent device's counter after probe.
  1803. * As child is active, parent will not be put into
  1804. * suspend mode.
  1805. */
  1806. if (pdev->dev.parent)
  1807. pm_runtime_put_sync(pdev->dev.parent);
  1808. if (mehci->enable_hbm)
  1809. hbm_init(hcd, pdata->disable_park_mode);
  1810. if (pdata && pdata->consider_ipa_handshake)
  1811. msm_bam_set_hsic_host_dev(&pdev->dev);
  1812. return 0;
  1813. destroy_wq:
  1814. destroy_workqueue(ehci_wq);
  1815. deinit_vddcx:
  1816. msm_hsic_init_vddcx(mehci, 0);
  1817. msm_hsic_init_gdsc(mehci, 0);
  1818. deinit_clocks:
  1819. msm_hsic_init_clocks(mehci, 0);
  1820. unmap:
  1821. iounmap(hcd->regs);
  1822. put_hcd:
  1823. usb_put_hcd(hcd);
  1824. return ret;
  1825. }
  1826. static int __devexit ehci_hsic_msm_remove(struct platform_device *pdev)
  1827. {
  1828. struct usb_hcd *hcd = platform_get_drvdata(pdev);
  1829. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  1830. struct msm_hsic_host_platform_data *pdata = mehci->dev->platform_data;
  1831. if (pdata && pdata->consider_ipa_handshake)
  1832. msm_bam_set_hsic_host_dev(NULL);
  1833. /* If the device was removed no need to call pm_runtime_disable */
  1834. if (pdev->dev.power.power_state.event != PM_EVENT_INVALID)
  1835. pm_runtime_disable(&pdev->dev);
  1836. pm_runtime_set_suspended(&pdev->dev);
  1837. if (mehci->enable_hbm)
  1838. hbm_uninit();
  1839. /* Remove the HCD prior to releasing our resources. */
  1840. usb_remove_hcd(hcd);
  1841. if (pdata && pdata->standalone_latency)
  1842. pm_qos_remove_request(&mehci->pm_qos_req_dma);
  1843. if (mehci->peripheral_status_irq)
  1844. free_irq(mehci->peripheral_status_irq, mehci);
  1845. if (mehci->wakeup_irq) {
  1846. if (mehci->wakeup_irq_enabled)
  1847. disable_irq_wake(mehci->wakeup_irq);
  1848. free_irq(mehci->wakeup_irq, mehci);
  1849. }
  1850. if (mehci->async_irq) {
  1851. /* Async IRQ is used only in absence of dedicated wakeup irq */
  1852. if (!mehci->wakeup_irq)
  1853. disable_irq_wake(mehci->async_irq);
  1854. free_irq(mehci->async_irq, mehci);
  1855. }
  1856. /*
  1857. * If the update request is called after unregister, the request will
  1858. * fail. Results are undefined if unregister is called in the middle of
  1859. * update request.
  1860. */
  1861. mehci->bus_vote = false;
  1862. cancel_work_sync(&mehci->bus_vote_w);
  1863. if (mehci->bus_perf_client)
  1864. msm_bus_scale_unregister_client(mehci->bus_perf_client);
  1865. ehci_hsic_msm_debugfs_cleanup();
  1866. device_init_wakeup(&pdev->dev, 0);
  1867. destroy_workqueue(ehci_wq);
  1868. msm_hsic_config_gpios(mehci, 0);
  1869. if (pdata && pdata->resume_gpio)
  1870. gpio_free(pdata->resume_gpio);
  1871. msm_hsic_init_vddcx(mehci, 0);
  1872. msm_hsic_init_gdsc(mehci, 0);
  1873. msm_hsic_init_clocks(mehci, 0);
  1874. wake_lock_destroy(&mehci->wlock);
  1875. iounmap(hcd->regs);
  1876. usb_put_hcd(hcd);
  1877. if (pdev->dev.of_node)
  1878. pdev->dev.platform_data = NULL;
  1879. return 0;
  1880. }
  1881. #ifdef CONFIG_PM_SLEEP
  1882. static int msm_hsic_pm_suspend(struct device *dev)
  1883. {
  1884. struct usb_hcd *hcd = dev_get_drvdata(dev);
  1885. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  1886. dev_dbg(dev, "ehci-msm-hsic PM suspend\n");
  1887. dbg_log_event(NULL, "PM Suspend", 0);
  1888. if (!atomic_read(&mehci->in_lpm)) {
  1889. dev_info(dev, "abort suspend\n");
  1890. dbg_log_event(NULL, "PM Suspend abort", 0);
  1891. return -EBUSY;
  1892. }
  1893. if (device_may_wakeup(dev) && !mehci->async_irq)
  1894. enable_irq_wake(hcd->irq);
  1895. return 0;
  1896. }
  1897. static int msm_hsic_pm_suspend_noirq(struct device *dev)
  1898. {
  1899. struct usb_hcd *hcd = dev_get_drvdata(dev);
  1900. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  1901. if (atomic_read(&mehci->async_int)) {
  1902. dev_dbg(dev, "suspend_noirq: Aborting due to pending interrupt\n");
  1903. return -EBUSY;
  1904. }
  1905. return 0;
  1906. }
  1907. static int msm_hsic_pm_resume(struct device *dev)
  1908. {
  1909. int ret;
  1910. struct usb_hcd *hcd = dev_get_drvdata(dev);
  1911. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  1912. dev_dbg(dev, "ehci-msm-hsic PM resume\n");
  1913. dbg_log_event(NULL, "PM Resume", 0);
  1914. if (device_may_wakeup(dev) && !mehci->async_irq)
  1915. disable_irq_wake(hcd->irq);
  1916. /*
  1917. * Keep HSIC in Low Power Mode if system is resumed
  1918. * by any other wakeup source. HSIC is resumed later
  1919. * when remote wakeup is received or interface driver
  1920. * start I/O.
  1921. */
  1922. if (!atomic_read(&mehci->pm_usage_cnt) &&
  1923. !atomic_read(&mehci->async_int) &&
  1924. pm_runtime_suspended(dev))
  1925. return 0;
  1926. ret = msm_hsic_resume(mehci);
  1927. if (ret)
  1928. return ret;
  1929. /* Bring the device to full powered state upon system resume */
  1930. pm_runtime_disable(dev);
  1931. pm_runtime_set_active(dev);
  1932. pm_runtime_enable(dev);
  1933. return 0;
  1934. }
  1935. #endif
  1936. #ifdef CONFIG_PM_RUNTIME
  1937. static int msm_hsic_runtime_idle(struct device *dev)
  1938. {
  1939. dev_dbg(dev, "EHCI runtime idle\n");
  1940. return 0;
  1941. }
  1942. static int msm_hsic_runtime_suspend(struct device *dev)
  1943. {
  1944. struct usb_hcd *hcd = dev_get_drvdata(dev);
  1945. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  1946. dev_dbg(dev, "EHCI runtime suspend\n");
  1947. dbg_log_event(NULL, "Run Time PM Suspend", 0);
  1948. return msm_hsic_suspend(mehci);
  1949. }
  1950. static int msm_hsic_runtime_resume(struct device *dev)
  1951. {
  1952. struct usb_hcd *hcd = dev_get_drvdata(dev);
  1953. struct msm_hsic_hcd *mehci = hcd_to_hsic(hcd);
  1954. dev_dbg(dev, "EHCI runtime resume\n");
  1955. dbg_log_event(NULL, "Run Time PM Resume", 0);
  1956. return msm_hsic_resume(mehci);
  1957. }
  1958. #endif
  1959. #ifdef CONFIG_PM
  1960. static const struct dev_pm_ops msm_hsic_dev_pm_ops = {
  1961. SET_SYSTEM_SLEEP_PM_OPS(msm_hsic_pm_suspend, msm_hsic_pm_resume)
  1962. .suspend_noirq = msm_hsic_pm_suspend_noirq,
  1963. SET_RUNTIME_PM_OPS(msm_hsic_runtime_suspend, msm_hsic_runtime_resume,
  1964. msm_hsic_runtime_idle)
  1965. };
  1966. #endif
  1967. static const struct of_device_id hsic_host_dt_match[] = {
  1968. { .compatible = "qcom,hsic-host",
  1969. },
  1970. {}
  1971. };
  1972. static struct platform_driver ehci_msm_hsic_driver = {
  1973. .probe = ehci_hsic_msm_probe,
  1974. .remove = __devexit_p(ehci_hsic_msm_remove),
  1975. .driver = {
  1976. .name = "msm_hsic_host",
  1977. #ifdef CONFIG_PM
  1978. .pm = &msm_hsic_dev_pm_ops,
  1979. #endif
  1980. .of_match_table = hsic_host_dt_match,
  1981. },
  1982. };