ice40-hcd.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141
  1. /*
  2. * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2001-2004 by David Brownell
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. /*
  16. * Root HUB management and Asynchronous scheduling traversal
  17. * Based on ehci-hub.c and ehci-q.c
  18. */
  19. #define pr_fmt(fmt) "%s: " fmt, __func__
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/err.h>
  23. #include <linux/ktime.h>
  24. #include <linux/uaccess.h>
  25. #include <linux/debugfs.h>
  26. #include <linux/pm_runtime.h>
  27. #include <linux/regulator/consumer.h>
  28. #include <linux/gpio.h>
  29. #include <linux/of_gpio.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/firmware.h>
  32. #include <linux/spi/spi.h>
  33. #include <linux/usb.h>
  34. #include <linux/usb/hcd.h>
  35. #include <linux/usb/ch9.h>
  36. #include <linux/usb/ch11.h>
  37. #include <asm/unaligned.h>
  38. #include <mach/gpiomux.h>
  39. #define CREATE_TRACE_POINTS
  40. #include <trace/events/ice40.h>
  41. #define FADDR_REG 0x00 /* R/W: Device address */
  42. #define HCMD_REG 0x01 /* R/W: Host transfer command */
  43. #define XFRST_REG 0x02 /* R: Transfer status */
  44. #define IRQ_REG 0x03 /* R/C: IRQ status */
  45. #define IEN_REG 0x04 /* R/W: IRQ enable */
  46. #define CTRL0_REG 0x05 /* R/W: Host control command */
  47. #define CTRL1_REG 0x06 /* R/W: Host control command */
  48. #define WBUF0_REG 0x10 /* W: Tx fifo 0 */
  49. #define WBUF1_REG 0x11 /* W: Tx fifo 1 */
  50. #define SUBUF_REG 0x12 /* W: SETUP fifo */
  51. #define WBLEN_REG 0x13 /* W: Tx fifo size */
  52. #define RBUF0_REG 0x18 /* R: Rx fifo 0 */
  53. #define RBUF1_REG 0x19 /* R: Rx fifo 1 */
  54. #define RBLEN_REG 0x1B /* R: Rx fifo size */
  55. #define WRITE_CMD(addr) ((addr << 3) | 1)
  56. #define READ_CMD(addr) ((addr << 3) | 0)
  57. /* Host controller command register definitions */
  58. #define HCMD_EP(ep) (ep & 0xF)
  59. #define HCMD_BSEL(sel) (sel << 4)
  60. #define HCMD_TOGV(toggle) (toggle << 5)
  61. #define HCMD_PT(token) (token << 6)
  62. /* Transfer status register definitions */
  63. #define XFR_MASK(xfr) (xfr & 0xF)
  64. #define XFR_SUCCESS 0x0
  65. #define XFR_BUSY 0x1
  66. #define XFR_PKTERR 0x2
  67. #define XFR_PIDERR 0x3
  68. #define XFR_NAK 0x4
  69. #define XFR_STALL 0x5
  70. #define XFR_WRONGPID 0x6
  71. #define XFR_CRCERR 0x7
  72. #define XFR_TOGERR 0x8
  73. #define XFR_BADLEN 0x9
  74. #define XFR_TIMEOUT 0xA
  75. #define LINE_STATE(xfr) ((xfr & 0x30) >> 4) /* D+, D- */
  76. #define DPST BIT(5)
  77. #define DMST BIT(4)
  78. #define PLLOK BIT(6)
  79. #define R64B BIT(7)
  80. /* Interrupt enable/status register definitions */
  81. #define RESET_IRQ BIT(0)
  82. #define RESUME_IRQ BIT(1)
  83. #define SUSP_IRQ BIT(3)
  84. #define DISCONNECT_IRQ BIT(4)
  85. #define CONNECT_IRQ BIT(5)
  86. #define FRAME_IRQ BIT(6)
  87. #define XFR_IRQ BIT(7)
  88. /* Control 0 register definitions */
  89. #define RESET_CTRL BIT(0)
  90. #define FRAME_RESET_CTRL BIT(1)
  91. #define DET_BUS_CTRL BIT(2)
  92. #define RESUME_CTRL BIT(3)
  93. #define SOFEN_CTRL BIT(4)
  94. #define DM_PD_CTRL BIT(6)
  95. #define DP_PD_CTRL BIT(7)
  96. #define HRST_CTRL BIT(5)
  97. /* Control 1 register definitions */
  98. #define INT_EN_CTRL BIT(0)
  99. enum ice40_xfr_type {
  100. FIRMWARE_XFR,
  101. REG_WRITE_XFR,
  102. REG_READ_XFR,
  103. SETUP_XFR,
  104. DATA_IN_XFR,
  105. DATA_OUT_XFR,
  106. };
  107. enum ice40_ep_phase {
  108. SETUP_PHASE = 1,
  109. DATA_PHASE,
  110. STATUS_PHASE,
  111. };
  112. struct ice40_ep {
  113. u8 xcat_err;
  114. bool unlinking;
  115. bool halted;
  116. struct usb_host_endpoint *ep;
  117. struct list_head ep_list;
  118. };
  119. struct ice40_hcd {
  120. spinlock_t lock;
  121. struct mutex wlock;
  122. struct mutex rlock;
  123. u8 devnum;
  124. u32 port_flags;
  125. u8 ctrl0;
  126. u8 wblen0;
  127. enum ice40_ep_phase ep0_state;
  128. struct usb_hcd *hcd;
  129. struct list_head async_list;
  130. struct workqueue_struct *wq;
  131. struct work_struct async_work;
  132. int reset_gpio;
  133. int slave_select_gpio;
  134. int config_done_gpio;
  135. int vcc_en_gpio;
  136. int clk_en_gpio;
  137. struct regulator *core_vcc;
  138. struct regulator *spi_vcc;
  139. struct regulator *gpio_vcc;
  140. bool powered;
  141. struct dentry *dbg_root;
  142. bool pcd_pending;
  143. /* SPI stuff later */
  144. struct spi_device *spi;
  145. struct spi_message *fmsg;
  146. struct spi_transfer *fmsg_xfr; /* size 1 */
  147. struct spi_message *wmsg;
  148. struct spi_transfer *wmsg_xfr; /* size 1 */
  149. u8 *w_tx_buf;
  150. u8 *w_rx_buf;
  151. struct spi_message *rmsg;
  152. struct spi_transfer *rmsg_xfr; /* size 1 */
  153. u8 *r_tx_buf;
  154. u8 *r_rx_buf;
  155. struct spi_message *setup_msg;
  156. struct spi_transfer *setup_xfr; /* size 2 */
  157. u8 *setup_buf; /* size 1 for SUBUF */
  158. struct spi_message *in_msg;
  159. struct spi_transfer *in_xfr; /* size 2 */
  160. u8 *in_buf; /* size 2 for reading from RBUF0 */
  161. struct spi_message *out_msg;
  162. struct spi_transfer *out_xfr; /* size 2 */
  163. u8 *out_buf; /* size 1 for writing WBUF0 */
  164. };
  165. static char fw_name[16] = "ice40.bin";
  166. module_param_string(fw, fw_name, sizeof(fw_name), S_IRUGO | S_IWUSR);
  167. MODULE_PARM_DESC(fw, "firmware blob file name");
  168. static bool debugger;
  169. module_param(debugger, bool, S_IRUGO | S_IWUSR);
  170. MODULE_PARM_DESC(debugger, "true to use the debug port");
  171. static inline struct ice40_hcd *hcd_to_ihcd(struct usb_hcd *hcd)
  172. {
  173. return *((struct ice40_hcd **) hcd->hcd_priv);
  174. }
  175. static void ice40_spi_reg_write(struct ice40_hcd *ihcd, u8 val, u8 addr)
  176. {
  177. int ret;
  178. /*
  179. * Register Write Pattern:
  180. * TX: 1st byte is CMD (register + write), 2nd byte is value
  181. * RX: Ignore
  182. *
  183. * The Mutex is to protect concurrent register writes as
  184. * we have only 1 SPI message struct.
  185. */
  186. mutex_lock(&ihcd->wlock);
  187. ihcd->w_tx_buf[0] = WRITE_CMD(addr);
  188. ihcd->w_tx_buf[1] = val;
  189. ret = spi_sync(ihcd->spi, ihcd->wmsg);
  190. if (ret < 0) /* should not happen */
  191. pr_err("failed. val = %d addr = %d\n", val, addr);
  192. trace_ice40_reg_write(addr, val, ihcd->w_tx_buf[0],
  193. ihcd->w_tx_buf[1], ret);
  194. mutex_unlock(&ihcd->wlock);
  195. }
  196. static int ice40_spi_reg_read(struct ice40_hcd *ihcd, u8 addr)
  197. {
  198. int ret;
  199. /*
  200. * Register Read Pattern:
  201. * TX: 1st byte is CMD (register + read)
  202. * RX: 1st, 2nd byte Ignore, 3rd byte value.
  203. *
  204. * The Mutex is to protect concurrent register reads as
  205. * we have only 1 SPI message struct.
  206. */
  207. mutex_lock(&ihcd->rlock);
  208. ihcd->r_tx_buf[0] = READ_CMD(addr);
  209. ret = spi_sync(ihcd->spi, ihcd->rmsg);
  210. if (ret < 0)
  211. pr_err("failed. addr = %d\n", addr);
  212. else
  213. ret = ihcd->r_rx_buf[2];
  214. trace_ice40_reg_read(addr, ihcd->r_tx_buf[0], ret);
  215. mutex_unlock(&ihcd->rlock);
  216. return ret;
  217. }
  218. static int ice40_poll_xfer(struct ice40_hcd *ihcd, int usecs)
  219. {
  220. ktime_t start = ktime_get();
  221. u8 val, retry = 0;
  222. u8 ret = ~0; /* time out */
  223. again:
  224. /*
  225. * The SPI transaction may take tens of usec. Use ktime
  226. * based checks rather than loop count.
  227. */
  228. do {
  229. val = ice40_spi_reg_read(ihcd, XFRST_REG);
  230. if (XFR_MASK(val) != XFR_BUSY)
  231. return val;
  232. } while (ktime_us_delta(ktime_get(), start) < usecs);
  233. /*
  234. * The SPI transaction involves a context switch. For any
  235. * reason, if we are scheduled out more than usecs after
  236. * the 1st read, this extra read will help.
  237. */
  238. if (!retry) {
  239. retry = 1;
  240. goto again;
  241. }
  242. return ret;
  243. }
  244. static int
  245. ice40_handshake(struct ice40_hcd *ihcd, u8 reg, u8 mask, u8 done, int usecs)
  246. {
  247. ktime_t start = ktime_get();
  248. u8 val, retry = 0;
  249. again:
  250. do {
  251. val = ice40_spi_reg_read(ihcd, reg);
  252. val &= mask;
  253. if (val == done)
  254. return 0;
  255. } while (ktime_us_delta(ktime_get(), start) < usecs);
  256. if (!retry) {
  257. retry = 1;
  258. goto again;
  259. }
  260. return -ETIMEDOUT;
  261. }
  262. static const char hcd_name[] = "ice40-hcd";
  263. static int ice40_reset(struct usb_hcd *hcd)
  264. {
  265. struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
  266. u8 ctrl, status;
  267. int ret = 0;
  268. /*
  269. * Program the defualt address 0. The device address is
  270. * re-programmed after SET_ADDRESS in URB handling path.
  271. */
  272. ihcd->devnum = 0;
  273. ice40_spi_reg_write(ihcd, 0, FADDR_REG);
  274. ihcd->wblen0 = ~0;
  275. /*
  276. * Read the line state. This driver is loaded after the
  277. * UICC card insertion. So the line state should indicate
  278. * that a Full-speed device is connected. Return error
  279. * if there is no device connected.
  280. *
  281. * There can be no device connected during debug. A debugfs
  282. * file is provided to sample the bus line and update the
  283. * port flags accordingly.
  284. */
  285. if (debugger)
  286. goto out;
  287. ctrl = ice40_spi_reg_read(ihcd, CTRL0_REG);
  288. ice40_spi_reg_write(ihcd, ctrl | DET_BUS_CTRL, CTRL0_REG);
  289. ret = ice40_handshake(ihcd, CTRL0_REG, DET_BUS_CTRL, 0, 5000);
  290. if (ret) {
  291. pr_err("bus detection failed\n");
  292. goto out;
  293. }
  294. status = ice40_spi_reg_read(ihcd, XFRST_REG);
  295. pr_debug("line state (D+, D-) is %d\n", LINE_STATE(status));
  296. if (status & DPST) {
  297. pr_debug("Full speed device connected\n");
  298. ihcd->port_flags |= USB_PORT_STAT_CONNECTION;
  299. } else {
  300. pr_err("No device connected\n");
  301. ret = -ENODEV;
  302. }
  303. out:
  304. return ret;
  305. }
  306. static int ice40_run(struct usb_hcd *hcd)
  307. {
  308. struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
  309. /*
  310. * HCD_FLAG_POLL_RH flag is not set by us. Core will not poll
  311. * for the port status periodically. This uses_new_polling
  312. * flag tells core that this hcd will call usb_hcd_poll_rh_status
  313. * upon port change.
  314. */
  315. hcd->uses_new_polling = 1;
  316. /*
  317. * Cache the ctrl0 register to avoid multiple reads. This register
  318. * is written during reset and resume.
  319. */
  320. ihcd->ctrl0 = ice40_spi_reg_read(ihcd, CTRL0_REG);
  321. ihcd->ctrl0 |= SOFEN_CTRL;
  322. ice40_spi_reg_write(ihcd, ihcd->ctrl0, CTRL0_REG);
  323. return 0;
  324. }
  325. static void ice40_stop(struct usb_hcd *hcd)
  326. {
  327. struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
  328. cancel_work_sync(&ihcd->async_work);
  329. }
  330. /*
  331. * The _Error looks odd. But very helpful when looking for
  332. * any errors in logs.
  333. */
  334. static char __maybe_unused *xfr_status_string(int status)
  335. {
  336. switch (XFR_MASK(status)) {
  337. case XFR_SUCCESS:
  338. return "Ack";
  339. case XFR_BUSY:
  340. return "Busy_Error";
  341. case XFR_PKTERR:
  342. return "Pkt_Error";
  343. case XFR_PIDERR:
  344. return "PID_Error";
  345. case XFR_NAK:
  346. return "Nak";
  347. case XFR_STALL:
  348. return "Stall_Error";
  349. case XFR_WRONGPID:
  350. return "WrongPID_Error";
  351. case XFR_CRCERR:
  352. return "CRC_Error";
  353. case XFR_TOGERR:
  354. return "Togg_Error";
  355. case XFR_BADLEN:
  356. return "BadLen_Error";
  357. case XFR_TIMEOUT:
  358. return "Timeout_Error";
  359. default:
  360. return "Unknown_Error";
  361. }
  362. }
  363. static int ice40_xfer_setup(struct ice40_hcd *ihcd, struct urb *urb)
  364. {
  365. struct usb_host_endpoint *ep = urb->ep;
  366. struct ice40_ep *iep = ep->hcpriv;
  367. void *buf = urb->setup_packet;
  368. int ret, status;
  369. u8 cmd;
  370. /*
  371. * SETUP transaction Handling:
  372. * - copy the setup buffer to SUBUF fifo
  373. * - Program HCMD register to initiate the SETP transaction.
  374. * - poll for completion by reading XFRST register.
  375. * - Interpret the result.
  376. */
  377. ihcd->setup_buf[0] = WRITE_CMD(SUBUF_REG);
  378. ihcd->setup_xfr[1].tx_buf = buf;
  379. ihcd->setup_xfr[1].len = sizeof(struct usb_ctrlrequest);
  380. ret = spi_sync(ihcd->spi, ihcd->setup_msg);
  381. if (ret < 0) {
  382. pr_err("SPI transfer failed\n");
  383. status = ret = -EIO;
  384. goto out;
  385. }
  386. cmd = HCMD_PT(2) | HCMD_TOGV(0) | HCMD_BSEL(0) | HCMD_EP(0);
  387. ice40_spi_reg_write(ihcd, cmd, HCMD_REG);
  388. status = ice40_poll_xfer(ihcd, 1000);
  389. switch (XFR_MASK(status)) {
  390. case XFR_SUCCESS:
  391. iep->xcat_err = 0;
  392. ret = 0;
  393. break;
  394. case XFR_NAK: /* Device should not return Nak for SETUP */
  395. case XFR_STALL:
  396. iep->xcat_err = 0;
  397. ret = -EPIPE;
  398. break;
  399. case XFR_PKTERR:
  400. case XFR_PIDERR:
  401. case XFR_WRONGPID:
  402. case XFR_CRCERR:
  403. case XFR_TIMEOUT:
  404. if (++iep->xcat_err < 8)
  405. ret = -EINPROGRESS;
  406. else
  407. ret = -EPROTO;
  408. break;
  409. default:
  410. pr_err("transaction timed out\n");
  411. ret = -EIO;
  412. }
  413. out:
  414. trace_ice40_setup(xfr_status_string(status), ret);
  415. return ret;
  416. }
  417. static int ice40_xfer_in(struct ice40_hcd *ihcd, struct urb *urb)
  418. {
  419. struct usb_host_endpoint *ep = urb->ep;
  420. struct usb_device *udev = urb->dev;
  421. u32 total_len = urb->transfer_buffer_length;
  422. u16 maxpacket = usb_endpoint_maxp(&ep->desc);
  423. u8 epnum = usb_pipeendpoint(urb->pipe);
  424. bool is_out = usb_pipeout(urb->pipe);
  425. struct ice40_ep *iep = ep->hcpriv;
  426. u8 cmd, status, len = 0, t, expected_len;
  427. void *buf;
  428. int ret;
  429. bool short_packet = true;
  430. if (epnum == 0 && ihcd->ep0_state == STATUS_PHASE) {
  431. expected_len = 0;
  432. buf = NULL;
  433. t = 1; /* STATUS PHASE is always DATA1 */
  434. } else {
  435. expected_len = min_t(u32, maxpacket,
  436. total_len - urb->actual_length);
  437. buf = urb->transfer_buffer + urb->actual_length;
  438. t = usb_gettoggle(udev, epnum, is_out);
  439. }
  440. /*
  441. * IN transaction Handling:
  442. * - Program HCMD register to initiate the IN transaction.
  443. * - poll for completion by reading XFRST register.
  444. * - Interpret the result.
  445. * - If ACK is received and we expect some data, read RBLEN
  446. * - Read the data from RBUF
  447. */
  448. cmd = HCMD_PT(0) | HCMD_TOGV(t) | HCMD_BSEL(0) | HCMD_EP(epnum);
  449. ice40_spi_reg_write(ihcd, cmd, HCMD_REG);
  450. status = ice40_poll_xfer(ihcd, 1000);
  451. switch (XFR_MASK(status)) {
  452. case XFR_SUCCESS:
  453. usb_dotoggle(udev, epnum, is_out);
  454. iep->xcat_err = 0;
  455. ret = 0;
  456. if ((expected_len == 64) && (status & R64B))
  457. short_packet = false;
  458. break;
  459. case XFR_NAK:
  460. iep->xcat_err = 0;
  461. ret = -EINPROGRESS;
  462. break;
  463. case XFR_TOGERR:
  464. /*
  465. * Peripheral had missed the previous Ack and sent
  466. * the same packet again. Ack is sent by the hardware.
  467. * As the data is received already, ignore this
  468. * event.
  469. */
  470. ret = -EINPROGRESS;
  471. break;
  472. case XFR_PKTERR:
  473. case XFR_PIDERR:
  474. case XFR_WRONGPID:
  475. case XFR_CRCERR:
  476. case XFR_TIMEOUT:
  477. if (++iep->xcat_err < 8)
  478. ret = -EINPROGRESS;
  479. else
  480. ret = -EPROTO;
  481. break;
  482. case XFR_STALL:
  483. ret = -EPIPE;
  484. break;
  485. case XFR_BADLEN:
  486. ret = -EOVERFLOW;
  487. break;
  488. default:
  489. pr_err("transaction timed out\n");
  490. ret = -EIO;
  491. }
  492. /*
  493. * Proceed further only if Ack is received and
  494. * we are expecting some data.
  495. */
  496. if (ret || !expected_len)
  497. goto out;
  498. if (short_packet)
  499. len = ice40_spi_reg_read(ihcd, RBLEN_REG);
  500. else
  501. len = 64;
  502. /* babble condition */
  503. if (len > expected_len) {
  504. pr_err("overflow condition\n");
  505. ret = -EOVERFLOW;
  506. goto out;
  507. }
  508. /*
  509. * zero len packet received. nothing to read from
  510. * FIFO.
  511. */
  512. if (len == 0) {
  513. ret = 0;
  514. goto out;
  515. }
  516. ihcd->in_buf[0] = READ_CMD(RBUF0_REG);
  517. ihcd->in_xfr[1].rx_buf = buf;
  518. ihcd->in_xfr[1].len = len;
  519. ret = spi_sync(ihcd->spi, ihcd->in_msg);
  520. if (ret < 0) {
  521. pr_err("SPI transfer failed\n");
  522. ret = -EIO;
  523. goto out;
  524. }
  525. urb->actual_length += len;
  526. if ((urb->actual_length == total_len) ||
  527. (len < expected_len))
  528. ret = 0; /* URB completed */
  529. else
  530. ret = -EINPROGRESS; /* still pending */
  531. out:
  532. trace_ice40_in(epnum, xfr_status_string(status), len,
  533. expected_len, ret);
  534. return ret;
  535. }
  536. static int ice40_xfer_out(struct ice40_hcd *ihcd, struct urb *urb)
  537. {
  538. struct usb_host_endpoint *ep = urb->ep;
  539. struct usb_device *udev = urb->dev;
  540. u32 total_len = urb->transfer_buffer_length;
  541. u16 maxpacket = usb_endpoint_maxp(&ep->desc);
  542. u8 epnum = usb_pipeendpoint(urb->pipe);
  543. bool is_out = usb_pipeout(urb->pipe);
  544. struct ice40_ep *iep = ep->hcpriv;
  545. u8 cmd, status, len, t;
  546. void *buf;
  547. int ret;
  548. if (epnum == 0 && ihcd->ep0_state == STATUS_PHASE) {
  549. len = 0;
  550. buf = NULL;
  551. t = 1; /* STATUS PHASE is always DATA1 */
  552. } else {
  553. len = min_t(u32, maxpacket, total_len - urb->actual_length);
  554. buf = urb->transfer_buffer + urb->actual_length;
  555. t = usb_gettoggle(udev, epnum, is_out);
  556. }
  557. /*
  558. * OUT transaction Handling:
  559. * - If we need to send data, write the data to WBUF Fifo
  560. * - Program the WBLEN register
  561. * - Program HCMD register to initiate the OUT transaction.
  562. * - poll for completion by reading XFRST register.
  563. * - Interpret the result.
  564. */
  565. if (!len)
  566. goto no_data;
  567. ihcd->out_buf[0] = WRITE_CMD(WBUF0_REG);
  568. ihcd->out_xfr[1].tx_buf = buf;
  569. ihcd->out_xfr[1].len = len;
  570. ret = spi_sync(ihcd->spi, ihcd->out_msg);
  571. if (ret < 0) {
  572. pr_err("SPI transaction failed\n");
  573. status = ret = -EIO;
  574. goto out;
  575. }
  576. no_data:
  577. /*
  578. * Cache the WBLEN register and update it only if it
  579. * is changed from the previous value.
  580. */
  581. if (len != ihcd->wblen0) {
  582. ice40_spi_reg_write(ihcd, len, WBLEN_REG);
  583. ihcd->wblen0 = len;
  584. }
  585. cmd = HCMD_PT(1) | HCMD_TOGV(t) | HCMD_BSEL(0) | HCMD_EP(epnum);
  586. ice40_spi_reg_write(ihcd, cmd, HCMD_REG);
  587. status = ice40_poll_xfer(ihcd, 1000);
  588. switch (XFR_MASK(status)) {
  589. case XFR_SUCCESS:
  590. usb_dotoggle(udev, epnum, is_out);
  591. urb->actual_length += len;
  592. iep->xcat_err = 0;
  593. if (!len || (urb->actual_length == total_len))
  594. ret = 0; /* URB completed */
  595. else
  596. ret = -EINPROGRESS; /* pending */
  597. break;
  598. case XFR_NAK:
  599. iep->xcat_err = 0;
  600. ret = -EINPROGRESS;
  601. break;
  602. case XFR_PKTERR:
  603. case XFR_PIDERR:
  604. case XFR_WRONGPID:
  605. case XFR_CRCERR:
  606. case XFR_TIMEOUT:
  607. if (++iep->xcat_err < 8)
  608. ret = -EINPROGRESS;
  609. else
  610. ret = -EPROTO;
  611. break;
  612. case XFR_STALL:
  613. ret = -EPIPE;
  614. break;
  615. case XFR_BADLEN:
  616. ret = -EOVERFLOW;
  617. break;
  618. default:
  619. pr_err("transaction timed out\n");
  620. ret = -EIO;
  621. }
  622. out:
  623. trace_ice40_out(epnum, xfr_status_string(status), len, ret);
  624. return ret;
  625. }
  626. static int ice40_process_urb(struct ice40_hcd *ihcd, struct urb *urb)
  627. {
  628. struct usb_device *udev = urb->dev;
  629. u8 devnum = usb_pipedevice(urb->pipe);
  630. bool is_out = usb_pipeout(urb->pipe);
  631. u32 total_len = urb->transfer_buffer_length;
  632. int ret = 0;
  633. /*
  634. * The USB device address can be reset to 0 by core temporarily
  635. * during reset recovery process. Don't assume anything about
  636. * device address. The device address is programmed as 0 by
  637. * default. If the device address is different to the previous
  638. * cached value, re-program it here before proceeding. The device
  639. * address register (FADDR) holds the value across multiple
  640. * transactions and we support only one device.
  641. */
  642. if (ihcd->devnum != devnum) {
  643. ice40_spi_reg_write(ihcd, devnum, FADDR_REG);
  644. ihcd->devnum = devnum;
  645. }
  646. switch (usb_pipetype(urb->pipe)) {
  647. case PIPE_CONTROL:
  648. switch (ihcd->ep0_state) {
  649. case SETUP_PHASE:
  650. trace_ice40_ep0("SETUP");
  651. ret = ice40_xfer_setup(ihcd, urb);
  652. if (ret)
  653. break;
  654. if (total_len) {
  655. ihcd->ep0_state = DATA_PHASE;
  656. /*
  657. * Data stage always begin with
  658. * DATA1 PID.
  659. */
  660. usb_settoggle(udev, 0, is_out, 1);
  661. } else {
  662. ihcd->ep0_state = STATUS_PHASE;
  663. goto do_status;
  664. }
  665. /* fall through */
  666. case DATA_PHASE:
  667. trace_ice40_ep0("DATA");
  668. if (is_out)
  669. ret = ice40_xfer_out(ihcd, urb);
  670. else
  671. ret = ice40_xfer_in(ihcd, urb);
  672. if (ret)
  673. break;
  674. /* DATA Phase is completed successfully */
  675. ihcd->ep0_state = STATUS_PHASE;
  676. /* fall through */
  677. case STATUS_PHASE:
  678. do_status:
  679. trace_ice40_ep0("STATUS");
  680. /* zero len DATA transfers have IN status */
  681. if (!total_len || is_out)
  682. ret = ice40_xfer_in(ihcd, urb);
  683. else
  684. ret = ice40_xfer_out(ihcd, urb);
  685. if (ret)
  686. break;
  687. ihcd->ep0_state = SETUP_PHASE;
  688. break;
  689. default:
  690. pr_err("unknown stage for a control transfer\n");
  691. break;
  692. }
  693. break;
  694. case PIPE_BULK:
  695. if (is_out)
  696. ret = ice40_xfer_out(ihcd, urb);
  697. else
  698. ret = ice40_xfer_in(ihcd, urb);
  699. /*
  700. * We may have to support zero len packet terminations
  701. * for URB_ZERO_PACKET URBs.
  702. */
  703. break;
  704. default:
  705. pr_err("IN/ISO transfers not supported\n");
  706. break;
  707. }
  708. return ret;
  709. }
  710. /* Must be called with spin lock and interrupts disabled */
  711. static void ice40_complete_urb(struct usb_hcd *hcd, struct urb *urb, int status)
  712. {
  713. struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
  714. struct usb_host_endpoint *ep = urb->ep;
  715. struct ice40_ep *iep = ep->hcpriv;
  716. struct urb *first_urb;
  717. bool needs_update = false;
  718. bool control = usb_pipecontrol(urb->pipe);
  719. /*
  720. * If the active URB i.e the first URB in the ep list is being
  721. * removed, clear the transaction error count. If it is a control
  722. * URB ep0_state needs to be reset to SETUP_PHASE.
  723. */
  724. first_urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
  725. if (urb == first_urb)
  726. needs_update = true;
  727. usb_hcd_unlink_urb_from_ep(hcd, urb);
  728. spin_unlock(&ihcd->lock);
  729. trace_ice40_urb_done(urb, status);
  730. usb_hcd_giveback_urb(ihcd->hcd, urb, status);
  731. spin_lock(&ihcd->lock);
  732. if (needs_update) {
  733. iep->xcat_err = 0;
  734. if (control)
  735. ihcd->ep0_state = SETUP_PHASE;
  736. }
  737. }
  738. static void ice40_async_work(struct work_struct *work)
  739. {
  740. struct ice40_hcd *ihcd = container_of(work,
  741. struct ice40_hcd, async_work);
  742. struct usb_hcd *hcd = ihcd->hcd;
  743. struct list_head *tmp, *uent, *utmp;
  744. struct ice40_ep *iep;
  745. struct usb_host_endpoint *ep;
  746. struct urb *urb;
  747. unsigned long flags;
  748. int status;
  749. /*
  750. * Traverse the active endpoints circularly and process URBs.
  751. * If any endpoint is marked for unlinking, the URBs are
  752. * completed here. The endpoint is removed from active list
  753. * if a URB is retired with -EPIPE/-EPROTO errors.
  754. */
  755. spin_lock_irqsave(&ihcd->lock, flags);
  756. if (list_empty(&ihcd->async_list))
  757. goto out;
  758. iep = list_first_entry(&ihcd->async_list, struct ice40_ep, ep_list);
  759. while (1) {
  760. ep = iep->ep;
  761. urb = list_first_entry(&ep->urb_list, struct urb, urb_list);
  762. if (urb->unlinked) {
  763. status = urb->unlinked;
  764. } else {
  765. spin_unlock_irqrestore(&ihcd->lock, flags);
  766. status = ice40_process_urb(ihcd, urb);
  767. spin_lock_irqsave(&ihcd->lock, flags);
  768. }
  769. if ((status == -EPIPE) || (status == -EPROTO))
  770. iep->halted = true;
  771. if (status != -EINPROGRESS)
  772. ice40_complete_urb(hcd, urb, status);
  773. if (iep->unlinking) {
  774. list_for_each_safe(uent, utmp, &ep->urb_list) {
  775. urb = list_entry(uent, struct urb, urb_list);
  776. if (urb->unlinked)
  777. ice40_complete_urb(hcd, urb, 0);
  778. }
  779. iep->unlinking = false;
  780. }
  781. tmp = iep->ep_list.next;
  782. if (list_empty(&ep->urb_list) || iep->halted) {
  783. list_del_init(&iep->ep_list);
  784. if (list_empty(&ihcd->async_list))
  785. break;
  786. }
  787. if (tmp == &ihcd->async_list)
  788. tmp = tmp->next;
  789. iep = list_entry(tmp, struct ice40_ep, ep_list);
  790. }
  791. out:
  792. spin_unlock_irqrestore(&ihcd->lock, flags);
  793. }
  794. static int
  795. ice40_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
  796. {
  797. struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
  798. struct usb_device *udev = urb->dev;
  799. struct usb_host_endpoint *ep = urb->ep;
  800. bool is_out = usb_pipeout(urb->pipe);
  801. u8 epnum = usb_pipeendpoint(urb->pipe);
  802. struct ice40_ep *iep;
  803. unsigned long flags;
  804. int ret;
  805. /*
  806. * This bridge chip supports only Full-speed. So ISO is not
  807. * supported. Interrupt support is not implemented as there
  808. * is no use case.
  809. */
  810. if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe)) {
  811. pr_debug("iso and int xfers not supported\n");
  812. ret = -ENOTSUPP;
  813. goto out;
  814. }
  815. spin_lock_irqsave(&ihcd->lock, flags);
  816. ret = usb_hcd_link_urb_to_ep(hcd, urb);
  817. if (ret)
  818. goto rel_lock;
  819. trace_ice40_urb_enqueue(urb);
  820. iep = ep->hcpriv;
  821. if (!iep) {
  822. iep = kzalloc(sizeof(struct ice40_ep), GFP_ATOMIC);
  823. if (!iep) {
  824. pr_debug("fail to allocate iep\n");
  825. ret = -ENOMEM;
  826. goto unlink;
  827. }
  828. ep->hcpriv = iep;
  829. INIT_LIST_HEAD(&iep->ep_list);
  830. iep->ep = ep;
  831. usb_settoggle(udev, epnum, is_out, 0);
  832. if (usb_pipecontrol(urb->pipe))
  833. ihcd->ep0_state = SETUP_PHASE;
  834. }
  835. /*
  836. * We expect the interface driver to clear the stall condition
  837. * before queueing another URB. For example mass storage
  838. * device may STALL a bulk endpoint for un-supported command.
  839. * The storage driver clear the STALL condition before queueing
  840. * another URB.
  841. */
  842. iep->halted = false;
  843. if (list_empty(&iep->ep_list))
  844. list_add_tail(&iep->ep_list, &ihcd->async_list);
  845. queue_work(ihcd->wq, &ihcd->async_work);
  846. spin_unlock_irqrestore(&ihcd->lock, flags);
  847. return 0;
  848. unlink:
  849. usb_hcd_unlink_urb_from_ep(hcd, urb);
  850. rel_lock:
  851. spin_unlock_irqrestore(&ihcd->lock, flags);
  852. out:
  853. return ret;
  854. }
  855. static int
  856. ice40_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  857. {
  858. struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
  859. struct usb_host_endpoint *ep = urb->ep;
  860. struct ice40_ep *iep;
  861. unsigned long flags;
  862. int ret;
  863. spin_lock_irqsave(&ihcd->lock, flags);
  864. ret = usb_hcd_check_unlink_urb(hcd, urb, status);
  865. if (ret)
  866. goto rel_lock;
  867. trace_ice40_urb_dequeue(urb);
  868. iep = ep->hcpriv;
  869. /*
  870. * If the endpoint is not in asynchronous schedule, complete
  871. * the URB immediately. Otherwise mark it as being unlinked.
  872. * The asynchronous schedule work will take care of completing
  873. * the URB when this endpoint is encountered during traversal.
  874. */
  875. if (list_empty(&iep->ep_list))
  876. ice40_complete_urb(hcd, urb, status);
  877. else
  878. iep->unlinking = true;
  879. rel_lock:
  880. spin_unlock_irqrestore(&ihcd->lock, flags);
  881. return ret;
  882. }
  883. static void
  884. ice40_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
  885. {
  886. struct ice40_ep *iep = ep->hcpriv;
  887. /*
  888. * If there is no I/O on this endpoint before, ep->hcpriv
  889. * will be NULL. nothing to do in this case.
  890. */
  891. if (!iep)
  892. return;
  893. if (!list_empty(&ep->urb_list))
  894. pr_err("trying to disable an non-empty endpoint\n");
  895. kfree(iep);
  896. ep->hcpriv = NULL;
  897. }
  898. static int ice40_hub_status_data(struct usb_hcd *hcd, char *buf)
  899. {
  900. struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
  901. int ret = 0;
  902. /*
  903. * core calls hub_status_method during suspend/resume.
  904. * return 0 if there is no port change. pcd_pending
  905. * is set to true when a device is connected and line
  906. * state is sampled via debugfs command. clear this
  907. * flag after returning the port change status.
  908. */
  909. if (ihcd->pcd_pending) {
  910. *buf = (1 << 1);
  911. ret = 1;
  912. ihcd->pcd_pending = false;
  913. }
  914. return ret;
  915. }
  916. static void ice40_hub_descriptor(struct usb_hub_descriptor *desc)
  917. {
  918. /* There is nothing special about us!! */
  919. desc->bDescLength = 9;
  920. desc->bDescriptorType = 0x29;
  921. desc->bNbrPorts = 1;
  922. desc->wHubCharacteristics = cpu_to_le16(HUB_CHAR_NO_LPSM |
  923. HUB_CHAR_NO_OCPM);
  924. desc->bPwrOn2PwrGood = 0;
  925. desc->bHubContrCurrent = 0;
  926. desc->u.hs.DeviceRemovable[0] = 0;
  927. desc->u.hs.DeviceRemovable[1] = ~0;
  928. }
  929. static int
  930. ice40_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
  931. u16 wIndex, char *buf, u16 wLength)
  932. {
  933. int ret = 0;
  934. u8 ctrl;
  935. struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
  936. /*
  937. * We have only 1 port. No special locking is required while
  938. * handling root hub commands. The bridge chip does not maintain
  939. * any port states. Maintain different port states in software.
  940. */
  941. switch (typeReq) {
  942. case ClearPortFeature:
  943. if (wIndex != 1 || wLength != 0)
  944. goto error;
  945. switch (wValue) {
  946. case USB_PORT_FEAT_SUSPEND:
  947. /*
  948. * The device is resumed as part of the root hub
  949. * resume to simplify the resume sequence. so
  950. * we may simply return from here. If device is
  951. * resumed before root hub is suspended, this
  952. * flags will be cleared here.
  953. */
  954. if (!(ihcd->port_flags & USB_PORT_STAT_SUSPEND))
  955. break;
  956. ihcd->port_flags &= ~USB_PORT_STAT_SUSPEND;
  957. break;
  958. case USB_PORT_FEAT_ENABLE:
  959. ihcd->port_flags &= ~USB_PORT_STAT_ENABLE;
  960. break;
  961. case USB_PORT_FEAT_POWER:
  962. ihcd->port_flags &= ~USB_PORT_STAT_POWER;
  963. break;
  964. case USB_PORT_FEAT_C_CONNECTION:
  965. ihcd->port_flags &= ~(USB_PORT_STAT_C_CONNECTION << 16);
  966. break;
  967. case USB_PORT_FEAT_C_ENABLE:
  968. case USB_PORT_FEAT_C_SUSPEND:
  969. case USB_PORT_FEAT_C_OVER_CURRENT:
  970. case USB_PORT_FEAT_C_RESET:
  971. /* nothing special here */
  972. break;
  973. default:
  974. goto error;
  975. }
  976. break;
  977. case GetHubDescriptor:
  978. ice40_hub_descriptor((struct usb_hub_descriptor *) buf);
  979. break;
  980. case GetHubStatus:
  981. put_unaligned_le32(0, buf);
  982. break;
  983. case GetPortStatus:
  984. if (wIndex != 1)
  985. goto error;
  986. /*
  987. * Core resets the device and requests port status to
  988. * stop the reset signaling. If there is a reset in
  989. * progress, finish it here.
  990. */
  991. ctrl = ice40_spi_reg_read(ihcd, CTRL0_REG);
  992. if (!(ctrl & RESET_CTRL))
  993. ihcd->port_flags &= ~USB_PORT_STAT_RESET;
  994. put_unaligned_le32(ihcd->port_flags, buf);
  995. break;
  996. case SetPortFeature:
  997. if (wIndex != 1 || wLength != 0)
  998. goto error;
  999. switch (wValue) {
  1000. case USB_PORT_FEAT_SUSPEND:
  1001. if (ihcd->port_flags & USB_PORT_STAT_RESET)
  1002. goto error;
  1003. if (!(ihcd->port_flags & USB_PORT_STAT_ENABLE))
  1004. goto error;
  1005. /* SOFs will be stopped during root hub suspend */
  1006. ihcd->port_flags |= USB_PORT_STAT_SUSPEND;
  1007. break;
  1008. case USB_PORT_FEAT_POWER:
  1009. ihcd->port_flags |= USB_PORT_STAT_POWER;
  1010. break;
  1011. case USB_PORT_FEAT_RESET:
  1012. /* Good time to enable the port */
  1013. ice40_spi_reg_write(ihcd, ihcd->ctrl0 |
  1014. RESET_CTRL, CTRL0_REG);
  1015. ihcd->port_flags |= USB_PORT_STAT_RESET;
  1016. ihcd->port_flags |= USB_PORT_STAT_ENABLE;
  1017. break;
  1018. default:
  1019. goto error;
  1020. }
  1021. break;
  1022. default:
  1023. error:
  1024. /* "protocol stall" on error */
  1025. ret = -EPIPE;
  1026. }
  1027. trace_ice40_hub_control(typeReq, wValue, wIndex, wLength, ret);
  1028. return ret;
  1029. }
  1030. static void ice40_spi_power_off(struct ice40_hcd *ihcd);
  1031. static int ice40_bus_suspend(struct usb_hcd *hcd)
  1032. {
  1033. struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
  1034. trace_ice40_bus_suspend(0); /* start */
  1035. /* This happens only during debugging */
  1036. if (!ihcd->devnum) {
  1037. pr_debug("device still not connected. abort suspend\n");
  1038. trace_ice40_bus_suspend(2); /* failure */
  1039. return -EAGAIN;
  1040. }
  1041. /*
  1042. * Stop sending the SOFs on downstream port. The device
  1043. * finds the bus idle and enter suspend. The device
  1044. * takes ~3 msec to enter suspend.
  1045. */
  1046. ihcd->ctrl0 &= ~SOFEN_CTRL;
  1047. ice40_spi_reg_write(ihcd, ihcd->ctrl0, CTRL0_REG);
  1048. usleep_range(4500, 5000);
  1049. /*
  1050. * Power collapse the bridge chip to avoid the leakage
  1051. * current.
  1052. */
  1053. ice40_spi_power_off(ihcd);
  1054. trace_ice40_bus_suspend(1); /* successful */
  1055. pm_relax(&ihcd->spi->dev);
  1056. return 0;
  1057. }
  1058. static int ice40_spi_load_fw(struct ice40_hcd *ihcd);
  1059. static int ice40_bus_resume(struct usb_hcd *hcd)
  1060. {
  1061. struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
  1062. u8 ctrl0;
  1063. int ret, i;
  1064. pm_stay_awake(&ihcd->spi->dev);
  1065. trace_ice40_bus_resume(0); /* start */
  1066. /*
  1067. * Power up the bridge chip and load the configuration file.
  1068. * Re-program the previous settings. For now we need to
  1069. * update the device address only.
  1070. */
  1071. for (i = 0; i < 3; i++) {
  1072. ret = ice40_spi_load_fw(ihcd);
  1073. if (!ret)
  1074. break;
  1075. }
  1076. if (ret) {
  1077. pr_err("Load firmware failed with ret: %d\n", ret);
  1078. return ret;
  1079. }
  1080. ice40_spi_reg_write(ihcd, ihcd->devnum, FADDR_REG);
  1081. ihcd->wblen0 = ~0;
  1082. /*
  1083. * Program the bridge chip to drive resume signaling. The SOFs
  1084. * are automatically transmitted after resume completion. It
  1085. * will take ~20 msec for resume completion.
  1086. */
  1087. ice40_spi_reg_write(ihcd, ihcd->ctrl0 | RESUME_CTRL, CTRL0_REG);
  1088. usleep_range(20000, 21000);
  1089. ret = ice40_handshake(ihcd, CTRL0_REG, RESUME_CTRL, 0, 5000);
  1090. if (ret) {
  1091. pr_err("resume failed\n");
  1092. trace_ice40_bus_resume(2); /* failure */
  1093. return -ENODEV;
  1094. }
  1095. ctrl0 = ice40_spi_reg_read(ihcd, CTRL0_REG);
  1096. if (!(ctrl0 & SOFEN_CTRL)) {
  1097. pr_err("SOFs are not transmitted after resume\n");
  1098. trace_ice40_bus_resume(3); /* failure */
  1099. return -ENODEV;
  1100. }
  1101. ihcd->port_flags &= ~USB_PORT_STAT_SUSPEND;
  1102. ihcd->ctrl0 |= SOFEN_CTRL;
  1103. trace_ice40_bus_resume(1); /* success */
  1104. return 0;
  1105. }
  1106. static void ice40_set_autosuspend_delay(struct usb_device *dev)
  1107. {
  1108. /*
  1109. * Immediate suspend for root hub and 500 msec auto-suspend
  1110. * timeout for the card.
  1111. */
  1112. if (!dev->parent)
  1113. pm_runtime_set_autosuspend_delay(&dev->dev, 0);
  1114. else
  1115. pm_runtime_set_autosuspend_delay(&dev->dev, 500);
  1116. }
  1117. static const struct hc_driver ice40_hc_driver = {
  1118. .description = hcd_name,
  1119. .product_desc = "ICE40 SPI Host Controller",
  1120. .hcd_priv_size = sizeof(struct ice40_hcd *),
  1121. .flags = HCD_USB11,
  1122. /* setup and clean up */
  1123. .reset = ice40_reset,
  1124. .start = ice40_run,
  1125. .stop = ice40_stop,
  1126. /* endpoint and I/O routines */
  1127. .urb_enqueue = ice40_urb_enqueue,
  1128. .urb_dequeue = ice40_urb_dequeue,
  1129. .endpoint_disable = ice40_endpoint_disable,
  1130. /* Root hub operations */
  1131. .hub_status_data = ice40_hub_status_data,
  1132. .hub_control = ice40_hub_control,
  1133. .bus_suspend = ice40_bus_suspend,
  1134. .bus_resume = ice40_bus_resume,
  1135. .set_autosuspend_delay = ice40_set_autosuspend_delay,
  1136. };
  1137. static int ice40_spi_parse_dt(struct ice40_hcd *ihcd)
  1138. {
  1139. struct device_node *node = ihcd->spi->dev.of_node;
  1140. int ret = 0;
  1141. if (!node) {
  1142. pr_err("device specific info missing\n");
  1143. ret = -ENODEV;
  1144. goto out;
  1145. }
  1146. ihcd->reset_gpio = of_get_named_gpio(node, "lattice,reset-gpio", 0);
  1147. if (ihcd->reset_gpio < 0) {
  1148. pr_err("reset gpio is missing\n");
  1149. ret = ihcd->reset_gpio;
  1150. goto out;
  1151. }
  1152. ihcd->slave_select_gpio = of_get_named_gpio(node,
  1153. "lattice,slave-select-gpio", 0);
  1154. if (ihcd->slave_select_gpio < 0) {
  1155. pr_err("slave select gpio is missing\n");
  1156. ret = ihcd->slave_select_gpio;
  1157. goto out;
  1158. }
  1159. ihcd->config_done_gpio = of_get_named_gpio(node,
  1160. "lattice,config-done-gpio", 0);
  1161. if (ihcd->config_done_gpio < 0) {
  1162. pr_err("config done gpio is missing\n");
  1163. ret = ihcd->config_done_gpio;
  1164. goto out;
  1165. }
  1166. ihcd->vcc_en_gpio = of_get_named_gpio(node, "lattice,vcc-en-gpio", 0);
  1167. if (ihcd->vcc_en_gpio < 0) {
  1168. pr_err("vcc enable gpio is missing\n");
  1169. ret = ihcd->vcc_en_gpio;
  1170. goto out;
  1171. }
  1172. /*
  1173. * When clk-en-gpio is present, it is used to enable the 19.2 MHz
  1174. * clock from MSM to the bridge chip. Otherwise on-board clock
  1175. * is used.
  1176. */
  1177. ihcd->clk_en_gpio = of_get_named_gpio(node, "lattice,clk-en-gpio", 0);
  1178. if (ihcd->clk_en_gpio < 0)
  1179. ihcd->clk_en_gpio = 0;
  1180. out:
  1181. return ret;
  1182. }
  1183. static void ice40_spi_power_off(struct ice40_hcd *ihcd)
  1184. {
  1185. if (!ihcd->powered)
  1186. return;
  1187. gpio_direction_output(ihcd->vcc_en_gpio, 0);
  1188. regulator_disable(ihcd->core_vcc);
  1189. regulator_disable(ihcd->spi_vcc);
  1190. if (ihcd->gpio_vcc)
  1191. regulator_disable(ihcd->gpio_vcc);
  1192. if (ihcd->clk_en_gpio)
  1193. gpio_direction_output(ihcd->clk_en_gpio, 0);
  1194. ihcd->powered = false;
  1195. }
  1196. static int ice40_spi_power_up(struct ice40_hcd *ihcd)
  1197. {
  1198. int ret;
  1199. if (ihcd->clk_en_gpio) {
  1200. ret = gpio_direction_output(ihcd->clk_en_gpio, 1);
  1201. if (ret < 0) {
  1202. pr_err("fail to enabel clk %d\n", ret);
  1203. goto out;
  1204. }
  1205. }
  1206. if (ihcd->gpio_vcc) {
  1207. ret = regulator_enable(ihcd->gpio_vcc); /* 1.8 V */
  1208. if (ret < 0) {
  1209. pr_err("fail to enable gpio vcc\n");
  1210. goto disable_clk;
  1211. }
  1212. }
  1213. ret = regulator_enable(ihcd->spi_vcc); /* 1.8 V */
  1214. if (ret < 0) {
  1215. pr_err("fail to enable spi vcc\n");
  1216. goto disable_gpio_vcc;
  1217. }
  1218. ret = regulator_enable(ihcd->core_vcc); /* 1.2 V */
  1219. if (ret < 0) {
  1220. pr_err("fail to enable core vcc\n");
  1221. goto disable_spi_vcc;
  1222. }
  1223. ret = gpio_direction_output(ihcd->vcc_en_gpio, 1);
  1224. if (ret < 0) {
  1225. pr_err("fail to assert vcc gpio\n");
  1226. goto disable_core_vcc;
  1227. }
  1228. ihcd->powered = true;
  1229. return 0;
  1230. disable_core_vcc:
  1231. regulator_disable(ihcd->core_vcc);
  1232. disable_spi_vcc:
  1233. regulator_disable(ihcd->spi_vcc);
  1234. disable_gpio_vcc:
  1235. if (ihcd->gpio_vcc)
  1236. regulator_disable(ihcd->gpio_vcc);
  1237. disable_clk:
  1238. if (ihcd->clk_en_gpio)
  1239. gpio_direction_output(ihcd->clk_en_gpio, 0);
  1240. out:
  1241. return ret;
  1242. }
  1243. static struct gpiomux_setting slave_select_setting = {
  1244. .func = GPIOMUX_FUNC_GPIO,
  1245. .drv = GPIOMUX_DRV_2MA,
  1246. .pull = GPIOMUX_PULL_NONE,
  1247. .dir = GPIOMUX_OUT_LOW,
  1248. };
  1249. static int ice40_spi_cache_fw(struct ice40_hcd *ihcd)
  1250. {
  1251. const struct firmware *fw;
  1252. void *buf;
  1253. size_t buf_len;
  1254. int ret;
  1255. ret = request_firmware(&fw, fw_name, &ihcd->spi->dev);
  1256. if (ret < 0) {
  1257. pr_err("fail to get the firmware\n");
  1258. goto out;
  1259. }
  1260. pr_debug("received firmware size = %zu\n", fw->size);
  1261. /*
  1262. * The bridge expects additional clock cycles after
  1263. * receiving the configuration data. We don't have a
  1264. * direct control over SPI clock. Add extra bytes
  1265. * to the confiration data.
  1266. */
  1267. buf_len = fw->size + 16;
  1268. buf = devm_kzalloc(&ihcd->spi->dev, buf_len, GFP_KERNEL);
  1269. if (!buf) {
  1270. pr_err("fail to allocate firmware buffer\n");
  1271. ret = -ENOMEM;
  1272. goto release;
  1273. }
  1274. /*
  1275. * The firmware buffer can not be used for DMA as it
  1276. * is not physically contiguous. We copy the data
  1277. * in kmalloc buffer. This buffer will be freed only
  1278. * during unbind or rmmod.
  1279. */
  1280. memcpy(buf, fw->data, fw->size);
  1281. release_firmware(fw);
  1282. /*
  1283. * The bridge supports only 25 MHz during configuration
  1284. * file loading.
  1285. */
  1286. ihcd->fmsg_xfr[0].tx_buf = buf;
  1287. ihcd->fmsg_xfr[0].len = buf_len;
  1288. ihcd->fmsg_xfr[0].speed_hz = 25000000;
  1289. return 0;
  1290. release:
  1291. release_firmware(fw);
  1292. out:
  1293. return ret;
  1294. }
  1295. static int ice40_spi_load_fw(struct ice40_hcd *ihcd)
  1296. {
  1297. int ret, i;
  1298. struct gpiomux_setting active_old_setting, suspend_old_setting;
  1299. ret = gpio_direction_output(ihcd->reset_gpio, 0);
  1300. if (ret < 0) {
  1301. pr_err("fail to assert reset %d\n", ret);
  1302. goto out;
  1303. }
  1304. ret = gpio_direction_output(ihcd->vcc_en_gpio, 0);
  1305. if (ret < 0) {
  1306. pr_err("fail to de-assert vcc_en gpio %d\n", ret);
  1307. goto out;
  1308. }
  1309. /*
  1310. * The bridge chip samples the chip select signal during
  1311. * power-up. If it is low, it enters SPI slave mode and
  1312. * accepts the configuration data from us. The chip
  1313. * select signal is managed by the SPI controller driver.
  1314. * We temporarily override the chip select config to
  1315. * drive it low. The SPI bus needs to be locked down during
  1316. * this period to avoid other slave data going to our
  1317. * bridge chip. Disable the SPI runtime suspend for exclusive
  1318. * chip select access.
  1319. */
  1320. pm_runtime_get_sync(ihcd->spi->master->dev.parent);
  1321. spi_bus_lock(ihcd->spi->master);
  1322. ret = msm_gpiomux_write(ihcd->slave_select_gpio, GPIOMUX_SUSPENDED,
  1323. &slave_select_setting, &suspend_old_setting);
  1324. if (ret < 0) {
  1325. pr_err("fail to override suspend setting and select slave %d\n",
  1326. ret);
  1327. spi_bus_unlock(ihcd->spi->master);
  1328. pm_runtime_put_noidle(ihcd->spi->master->dev.parent);
  1329. goto out;
  1330. }
  1331. ret = msm_gpiomux_write(ihcd->slave_select_gpio, GPIOMUX_ACTIVE,
  1332. &slave_select_setting, &active_old_setting);
  1333. if (ret < 0) {
  1334. pr_err("fail to override active setting and select slave %d\n",
  1335. ret);
  1336. spi_bus_unlock(ihcd->spi->master);
  1337. pm_runtime_put_noidle(ihcd->spi->master->dev.parent);
  1338. goto out;
  1339. }
  1340. ret = ice40_spi_power_up(ihcd);
  1341. if (ret < 0) {
  1342. pr_err("fail to power up the chip\n");
  1343. spi_bus_unlock(ihcd->spi->master);
  1344. pm_runtime_put_noidle(ihcd->spi->master->dev.parent);
  1345. goto out;
  1346. }
  1347. /*
  1348. * The databook says 1200 usec is required before the
  1349. * chip becomes ready for the SPI transfer.
  1350. */
  1351. usleep_range(1200, 1250);
  1352. ret = msm_gpiomux_write(ihcd->slave_select_gpio, GPIOMUX_SUSPENDED,
  1353. &suspend_old_setting, NULL);
  1354. if (ret < 0) {
  1355. pr_err("fail to rewrite suspend setting %d\n", ret);
  1356. spi_bus_unlock(ihcd->spi->master);
  1357. pm_runtime_put_noidle(ihcd->spi->master->dev.parent);
  1358. goto power_off;
  1359. }
  1360. ret = msm_gpiomux_write(ihcd->slave_select_gpio, GPIOMUX_ACTIVE,
  1361. &active_old_setting, NULL);
  1362. if (ret < 0) {
  1363. pr_err("fail to rewrite active setting %d\n", ret);
  1364. spi_bus_unlock(ihcd->spi->master);
  1365. pm_runtime_put_noidle(ihcd->spi->master->dev.parent);
  1366. goto power_off;
  1367. }
  1368. pm_runtime_put_noidle(ihcd->spi->master->dev.parent);
  1369. ret = spi_sync_locked(ihcd->spi, ihcd->fmsg);
  1370. spi_bus_unlock(ihcd->spi->master);
  1371. if (ret < 0) {
  1372. pr_err("spi write failed\n");
  1373. goto power_off;
  1374. }
  1375. for (i = 0; i < 1000; i++) {
  1376. ret = gpio_get_value(ihcd->config_done_gpio);
  1377. if (ret) {
  1378. pr_debug("config done asserted %d\n", i);
  1379. break;
  1380. }
  1381. udelay(1);
  1382. }
  1383. if (ret <= 0) {
  1384. pr_err("config done not asserted\n");
  1385. ret = -ENODEV;
  1386. goto power_off;
  1387. }
  1388. ret = gpio_direction_output(ihcd->reset_gpio, 1);
  1389. if (ret < 0) {
  1390. pr_err("fail to assert reset %d\n", ret);
  1391. goto power_off;
  1392. }
  1393. udelay(50);
  1394. ret = ice40_spi_reg_read(ihcd, XFRST_REG);
  1395. pr_debug("XFRST val is %x\n", ret);
  1396. if (!(ret & PLLOK)) {
  1397. pr_err("The PLL2 is not synchronized\n");
  1398. goto power_off;
  1399. }
  1400. pr_info("Firmware load success\n");
  1401. return 0;
  1402. power_off:
  1403. ice40_spi_power_off(ihcd);
  1404. out:
  1405. return ret;
  1406. }
  1407. static int ice40_spi_init_regulators(struct ice40_hcd *ihcd)
  1408. {
  1409. int ret;
  1410. ihcd->spi_vcc = devm_regulator_get(&ihcd->spi->dev, "spi-vcc");
  1411. if (IS_ERR(ihcd->spi_vcc)) {
  1412. ret = PTR_ERR(ihcd->spi_vcc);
  1413. if (ret != -EPROBE_DEFER)
  1414. pr_err("fail to get spi-vcc %d\n", ret);
  1415. goto out;
  1416. }
  1417. ret = regulator_set_voltage(ihcd->spi_vcc, 1800000, 1800000);
  1418. if (ret < 0) {
  1419. pr_err("fail to set spi-vcc %d\n", ret);
  1420. goto out;
  1421. }
  1422. ihcd->core_vcc = devm_regulator_get(&ihcd->spi->dev, "core-vcc");
  1423. if (IS_ERR(ihcd->core_vcc)) {
  1424. ret = PTR_ERR(ihcd->core_vcc);
  1425. if (ret != -EPROBE_DEFER)
  1426. pr_err("fail to get core-vcc %d\n", ret);
  1427. goto out;
  1428. }
  1429. ret = regulator_set_voltage(ihcd->core_vcc, 1200000, 1200000);
  1430. if (ret < 0) {
  1431. pr_err("fail to set core-vcc %d\n", ret);
  1432. goto out;
  1433. }
  1434. if (!of_get_property(ihcd->spi->dev.of_node, "gpio-supply", NULL))
  1435. goto out;
  1436. ihcd->gpio_vcc = devm_regulator_get(&ihcd->spi->dev, "gpio");
  1437. if (IS_ERR(ihcd->gpio_vcc)) {
  1438. ret = PTR_ERR(ihcd->gpio_vcc);
  1439. if (ret != -EPROBE_DEFER)
  1440. pr_err("fail to get gpio_vcc %d\n", ret);
  1441. goto out;
  1442. }
  1443. ret = regulator_set_voltage(ihcd->gpio_vcc, 1800000, 1800000);
  1444. if (ret < 0) {
  1445. pr_err("fail to set gpio_vcc %d\n", ret);
  1446. goto out;
  1447. }
  1448. out:
  1449. return ret;
  1450. }
  1451. static int ice40_spi_request_gpios(struct ice40_hcd *ihcd)
  1452. {
  1453. int ret;
  1454. ret = devm_gpio_request(&ihcd->spi->dev, ihcd->reset_gpio,
  1455. "ice40_reset");
  1456. if (ret < 0) {
  1457. pr_err("fail to request reset gpio\n");
  1458. goto out;
  1459. }
  1460. ret = devm_gpio_request(&ihcd->spi->dev, ihcd->config_done_gpio,
  1461. "ice40_config_done");
  1462. if (ret < 0) {
  1463. pr_err("fail to request config_done gpio\n");
  1464. goto out;
  1465. }
  1466. ret = devm_gpio_request(&ihcd->spi->dev, ihcd->vcc_en_gpio,
  1467. "ice40_vcc_en");
  1468. if (ret < 0) {
  1469. pr_err("fail to request vcc_en gpio\n");
  1470. goto out;
  1471. }
  1472. if (ihcd->clk_en_gpio) {
  1473. ret = devm_gpio_request(&ihcd->spi->dev, ihcd->clk_en_gpio,
  1474. "ice40_clk_en");
  1475. if (ret < 0)
  1476. pr_err("fail to request clk_en gpio\n");
  1477. }
  1478. out:
  1479. return ret;
  1480. }
  1481. static int
  1482. ice40_spi_init_one_xfr(struct ice40_hcd *ihcd, enum ice40_xfr_type type)
  1483. {
  1484. struct spi_message **m;
  1485. struct spi_transfer **t;
  1486. int n;
  1487. switch (type) {
  1488. case FIRMWARE_XFR:
  1489. m = &ihcd->fmsg;
  1490. t = &ihcd->fmsg_xfr;
  1491. n = 1;
  1492. break;
  1493. case REG_WRITE_XFR:
  1494. m = &ihcd->wmsg;
  1495. t = &ihcd->wmsg_xfr;
  1496. n = 1;
  1497. break;
  1498. case REG_READ_XFR:
  1499. m = &ihcd->rmsg;
  1500. t = &ihcd->rmsg_xfr;
  1501. n = 1;
  1502. break;
  1503. case SETUP_XFR:
  1504. m = &ihcd->setup_msg;
  1505. t = &ihcd->setup_xfr;
  1506. n = 2;
  1507. break;
  1508. case DATA_IN_XFR:
  1509. m = &ihcd->in_msg;
  1510. t = &ihcd->in_xfr;
  1511. n = 2;
  1512. break;
  1513. case DATA_OUT_XFR:
  1514. m = &ihcd->out_msg;
  1515. t = &ihcd->out_xfr;
  1516. n = 2;
  1517. break;
  1518. default:
  1519. return -EINVAL;
  1520. }
  1521. *m = devm_kzalloc(&ihcd->spi->dev, sizeof(**m), GFP_KERNEL);
  1522. if (*m == NULL)
  1523. goto out;
  1524. *t = devm_kzalloc(&ihcd->spi->dev, n * sizeof(**t), GFP_KERNEL);
  1525. if (*t == NULL)
  1526. goto out;
  1527. spi_message_init_with_transfers(*m, *t, n);
  1528. return 0;
  1529. out:
  1530. return -ENOMEM;
  1531. }
  1532. static int ice40_spi_init_xfrs(struct ice40_hcd *ihcd)
  1533. {
  1534. int ret = -ENOMEM;
  1535. ret = ice40_spi_init_one_xfr(ihcd, FIRMWARE_XFR);
  1536. if (ret < 0)
  1537. goto out;
  1538. ret = ice40_spi_init_one_xfr(ihcd, REG_WRITE_XFR);
  1539. if (ret < 0)
  1540. goto out;
  1541. ihcd->w_tx_buf = devm_kzalloc(&ihcd->spi->dev, 2, GFP_KERNEL);
  1542. if (!ihcd->w_tx_buf)
  1543. goto out;
  1544. ihcd->w_rx_buf = devm_kzalloc(&ihcd->spi->dev, 2, GFP_KERNEL);
  1545. if (!ihcd->w_rx_buf)
  1546. goto out;
  1547. ihcd->wmsg_xfr[0].tx_buf = ihcd->w_tx_buf;
  1548. ihcd->wmsg_xfr[0].rx_buf = ihcd->w_rx_buf;
  1549. ihcd->wmsg_xfr[0].len = 2;
  1550. ret = ice40_spi_init_one_xfr(ihcd, REG_READ_XFR);
  1551. if (ret < 0)
  1552. goto out;
  1553. ihcd->r_tx_buf = devm_kzalloc(&ihcd->spi->dev, 3, GFP_KERNEL);
  1554. if (!ihcd->r_tx_buf)
  1555. goto out;
  1556. ihcd->r_rx_buf = devm_kzalloc(&ihcd->spi->dev, 3, GFP_KERNEL);
  1557. if (!ihcd->r_rx_buf)
  1558. goto out;
  1559. ihcd->rmsg_xfr[0].tx_buf = ihcd->r_tx_buf;
  1560. ihcd->rmsg_xfr[0].rx_buf = ihcd->r_rx_buf;
  1561. ihcd->rmsg_xfr[0].len = 3;
  1562. ret = ice40_spi_init_one_xfr(ihcd, SETUP_XFR);
  1563. if (ret < 0)
  1564. goto out;
  1565. ihcd->setup_buf = devm_kzalloc(&ihcd->spi->dev, 1, GFP_KERNEL);
  1566. if (!ihcd->setup_buf)
  1567. goto out;
  1568. ihcd->setup_xfr[0].tx_buf = ihcd->setup_buf;
  1569. ihcd->setup_xfr[0].len = 1;
  1570. ret = ice40_spi_init_one_xfr(ihcd, DATA_IN_XFR);
  1571. if (ret < 0)
  1572. goto out;
  1573. ihcd->in_buf = devm_kzalloc(&ihcd->spi->dev, 2, GFP_KERNEL);
  1574. if (!ihcd->in_buf)
  1575. goto out;
  1576. ihcd->in_xfr[0].tx_buf = ihcd->in_buf;
  1577. ihcd->in_xfr[0].len = 2;
  1578. ret = ice40_spi_init_one_xfr(ihcd, DATA_OUT_XFR);
  1579. if (ret < 0)
  1580. goto out;
  1581. ihcd->out_buf = devm_kzalloc(&ihcd->spi->dev, 1, GFP_KERNEL);
  1582. if (!ihcd->out_buf)
  1583. goto out;
  1584. ihcd->out_xfr[0].tx_buf = ihcd->out_buf;
  1585. ihcd->out_xfr[0].len = 1;
  1586. return 0;
  1587. out:
  1588. return -ENOMEM;
  1589. }
  1590. static int ice40_dbg_cmd_open(struct inode *inode, struct file *file)
  1591. {
  1592. return single_open(file, NULL, inode->i_private);
  1593. }
  1594. static ssize_t ice40_dbg_cmd_write(struct file *file, const char __user *ubuf,
  1595. size_t count, loff_t *ppos)
  1596. {
  1597. struct seq_file *s = file->private_data;
  1598. struct ice40_hcd *ihcd = s->private;
  1599. char buf[32];
  1600. int ret;
  1601. u8 status, addr;
  1602. memset(buf, 0x00, sizeof(buf));
  1603. if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) {
  1604. ret = -EFAULT;
  1605. goto out;
  1606. }
  1607. if (!strcmp(buf, "poll")) {
  1608. if (!HCD_RH_RUNNING(ihcd->hcd)) {
  1609. ret = -EAGAIN;
  1610. goto out;
  1611. }
  1612. /*
  1613. * The bridge chip supports interrupt for device
  1614. * connect and disconnect. We don;t have a real
  1615. * use case of connect/disconnect. This debugfs
  1616. * interface provides a way to enumerate the
  1617. * attached device.
  1618. */
  1619. ice40_spi_reg_write(ihcd, ihcd->ctrl0 |
  1620. DET_BUS_CTRL, CTRL0_REG);
  1621. ice40_handshake(ihcd, CTRL0_REG, DET_BUS_CTRL, 0, 5000);
  1622. status = ice40_spi_reg_read(ihcd, XFRST_REG);
  1623. if ((status & DPST)) {
  1624. ihcd->port_flags |= USB_PORT_STAT_CONNECTION;
  1625. ihcd->port_flags |= USB_PORT_STAT_C_CONNECTION << 16;
  1626. ihcd->pcd_pending = true;
  1627. usb_hcd_poll_rh_status(ihcd->hcd);
  1628. } else if (ihcd->port_flags & USB_PORT_STAT_CONNECTION) {
  1629. ihcd->port_flags &= ~USB_PORT_STAT_ENABLE;
  1630. ihcd->port_flags &= ~USB_PORT_STAT_CONNECTION;
  1631. ihcd->port_flags |= (USB_PORT_STAT_C_CONNECTION << 16);
  1632. ihcd->pcd_pending = true;
  1633. usb_hcd_poll_rh_status(ihcd->hcd);
  1634. }
  1635. } else if (!strcmp(buf, "rwtest")) {
  1636. ihcd->devnum = 1;
  1637. ice40_spi_reg_write(ihcd, 0x1, FADDR_REG);
  1638. addr = ice40_spi_reg_read(ihcd, FADDR_REG);
  1639. pr_info("addr written was 0x1 read as %x\n", addr);
  1640. } else if (!strcmp(buf, "force_disconnect")) {
  1641. if (!HCD_RH_RUNNING(ihcd->hcd)) {
  1642. ret = -EAGAIN;
  1643. goto out;
  1644. }
  1645. /*
  1646. * Forcfully disconnect the device. This is required
  1647. * for simulating the disconnect on a USB port which
  1648. * does not have pull-down resistors.
  1649. */
  1650. ihcd->port_flags &= ~USB_PORT_STAT_ENABLE;
  1651. ihcd->port_flags &= ~USB_PORT_STAT_CONNECTION;
  1652. ihcd->port_flags |= (USB_PORT_STAT_C_CONNECTION << 16);
  1653. ihcd->pcd_pending = true;
  1654. usb_hcd_poll_rh_status(ihcd->hcd);
  1655. } else if (!strcmp(buf, "config_test")) {
  1656. ice40_spi_power_off(ihcd);
  1657. ret = ice40_spi_load_fw(ihcd);
  1658. if (ret) {
  1659. pr_err("config load failed\n");
  1660. goto out;
  1661. }
  1662. } else {
  1663. ret = -EINVAL;
  1664. goto out;
  1665. }
  1666. ret = count;
  1667. out:
  1668. return ret;
  1669. }
  1670. const struct file_operations ice40_dbg_cmd_ops = {
  1671. .open = ice40_dbg_cmd_open,
  1672. .write = ice40_dbg_cmd_write,
  1673. .llseek = seq_lseek,
  1674. .release = single_release,
  1675. };
  1676. static int ice40_debugfs_init(struct ice40_hcd *ihcd)
  1677. {
  1678. struct dentry *dir;
  1679. int ret = 0;
  1680. dir = debugfs_create_dir("ice40_hcd", NULL);
  1681. if (!dir || IS_ERR(dir)) {
  1682. ret = -ENODEV;
  1683. goto out;
  1684. }
  1685. ihcd->dbg_root = dir;
  1686. dir = debugfs_create_file("command", S_IWUSR, ihcd->dbg_root, ihcd,
  1687. &ice40_dbg_cmd_ops);
  1688. if (!dir) {
  1689. debugfs_remove_recursive(ihcd->dbg_root);
  1690. ihcd->dbg_root = NULL;
  1691. ret = -ENODEV;
  1692. }
  1693. out:
  1694. return ret;
  1695. }
  1696. static int ice40_spi_probe(struct spi_device *spi)
  1697. {
  1698. struct ice40_hcd *ihcd;
  1699. int ret;
  1700. ihcd = devm_kzalloc(&spi->dev, sizeof(*ihcd), GFP_KERNEL);
  1701. if (!ihcd) {
  1702. pr_err("fail to allocate ihcd\n");
  1703. ret = -ENOMEM;
  1704. goto out;
  1705. }
  1706. ihcd->spi = spi;
  1707. ret = ice40_spi_parse_dt(ihcd);
  1708. if (ret) {
  1709. pr_err("fail to parse dt node\n");
  1710. goto out;
  1711. }
  1712. ret = ice40_spi_init_regulators(ihcd);
  1713. if (ret) {
  1714. pr_err("fail to init regulators\n");
  1715. goto out;
  1716. }
  1717. ret = ice40_spi_request_gpios(ihcd);
  1718. if (ret) {
  1719. pr_err("fail to request gpios\n");
  1720. goto out;
  1721. }
  1722. spin_lock_init(&ihcd->lock);
  1723. INIT_LIST_HEAD(&ihcd->async_list);
  1724. INIT_WORK(&ihcd->async_work, ice40_async_work);
  1725. mutex_init(&ihcd->wlock);
  1726. mutex_init(&ihcd->rlock);
  1727. /*
  1728. * Enable all our trace points. Useful in debugging card
  1729. * enumeration issues.
  1730. */
  1731. ret = trace_set_clr_event(__stringify(TRACE_SYSTEM), NULL, 1);
  1732. if (ret < 0)
  1733. pr_err("fail to enable trace points with %d\n", ret);
  1734. ihcd->wq = create_singlethread_workqueue("ice40_wq");
  1735. if (!ihcd->wq) {
  1736. pr_err("fail to create workqueue\n");
  1737. ret = -ENOMEM;
  1738. goto destroy_mutex;
  1739. }
  1740. ret = ice40_spi_init_xfrs(ihcd);
  1741. if (ret) {
  1742. pr_err("fail to init spi xfrs %d\n", ret);
  1743. goto destroy_wq;
  1744. }
  1745. ret = ice40_spi_cache_fw(ihcd);
  1746. if (ret) {
  1747. pr_err("fail to cache fw %d\n", ret);
  1748. goto destroy_wq;
  1749. }
  1750. ret = ice40_spi_load_fw(ihcd);
  1751. if (ret) {
  1752. pr_err("fail to load fw %d\n", ret);
  1753. goto destroy_wq;
  1754. }
  1755. ihcd->hcd = usb_create_hcd(&ice40_hc_driver, &spi->dev, "ice40");
  1756. if (!ihcd->hcd) {
  1757. pr_err("fail to alloc hcd\n");
  1758. ret = -ENOMEM;
  1759. goto power_off;
  1760. }
  1761. *((struct ice40_hcd **) ihcd->hcd->hcd_priv) = ihcd;
  1762. ret = usb_add_hcd(ihcd->hcd, 0, 0);
  1763. if (ret < 0) {
  1764. pr_err("fail to add HCD\n");
  1765. goto put_hcd;
  1766. }
  1767. ice40_debugfs_init(ihcd);
  1768. /*
  1769. * We manage the power states of the bridge chip
  1770. * as part of root hub suspend/resume. We don't
  1771. * need to implement any additional runtime PM
  1772. * methods.
  1773. */
  1774. pm_runtime_no_callbacks(&spi->dev);
  1775. pm_runtime_set_active(&spi->dev);
  1776. pm_runtime_enable(&spi->dev);
  1777. /*
  1778. * This does not mean bridge chip can wakeup the
  1779. * system from sleep. It's activity can prevent
  1780. * or abort the system sleep. The device_init_wakeup
  1781. * creates the wakeup source for us which we will
  1782. * use to control system sleep.
  1783. */
  1784. device_init_wakeup(&spi->dev, 1);
  1785. pm_stay_awake(&spi->dev);
  1786. pr_debug("success\n");
  1787. return 0;
  1788. put_hcd:
  1789. usb_put_hcd(ihcd->hcd);
  1790. power_off:
  1791. ice40_spi_power_off(ihcd);
  1792. destroy_wq:
  1793. destroy_workqueue(ihcd->wq);
  1794. destroy_mutex:
  1795. mutex_destroy(&ihcd->rlock);
  1796. mutex_destroy(&ihcd->wlock);
  1797. out:
  1798. pr_info("ice40_spi_probe failed\n");
  1799. return ret;
  1800. }
  1801. static int ice40_spi_remove(struct spi_device *spi)
  1802. {
  1803. struct usb_hcd *hcd = spi_get_drvdata(spi);
  1804. struct ice40_hcd *ihcd = hcd_to_ihcd(hcd);
  1805. debugfs_remove_recursive(ihcd->dbg_root);
  1806. usb_remove_hcd(hcd);
  1807. usb_put_hcd(hcd);
  1808. destroy_workqueue(ihcd->wq);
  1809. ice40_spi_power_off(ihcd);
  1810. pm_runtime_disable(&spi->dev);
  1811. pm_relax(&spi->dev);
  1812. return 0;
  1813. }
  1814. static struct of_device_id ice40_spi_of_match_table[] = {
  1815. { .compatible = "lattice,ice40-spi-usb", },
  1816. {},
  1817. };
  1818. static struct spi_driver ice40_spi_driver = {
  1819. .driver = {
  1820. .name = "ice40_spi",
  1821. .owner = THIS_MODULE,
  1822. .of_match_table = ice40_spi_of_match_table,
  1823. },
  1824. .probe = ice40_spi_probe,
  1825. .remove = ice40_spi_remove,
  1826. };
  1827. module_spi_driver(ice40_spi_driver);
  1828. MODULE_DESCRIPTION("ICE40 FPGA based SPI-USB bridge HCD");
  1829. MODULE_LICENSE("GPL v2");