qib_sd7220.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448
  1. /*
  2. * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
  3. * All rights reserved.
  4. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. /*
  35. * This file contains all of the code that is specific to the SerDes
  36. * on the QLogic_IB 7220 chip.
  37. */
  38. #include <linux/pci.h>
  39. #include <linux/delay.h>
  40. #include <linux/firmware.h>
  41. #include "qib.h"
  42. #include "qib_7220.h"
  43. #define SD7220_FW_NAME "qlogic/sd7220.fw"
  44. MODULE_FIRMWARE(SD7220_FW_NAME);
  45. /*
  46. * Same as in qib_iba7220.c, but just the registers needed here.
  47. * Could move whole set to qib_7220.h, but decided better to keep
  48. * local.
  49. */
  50. #define KREG_IDX(regname) (QIB_7220_##regname##_OFFS / sizeof(u64))
  51. #define kr_hwerrclear KREG_IDX(HwErrClear)
  52. #define kr_hwerrmask KREG_IDX(HwErrMask)
  53. #define kr_hwerrstatus KREG_IDX(HwErrStatus)
  54. #define kr_ibcstatus KREG_IDX(IBCStatus)
  55. #define kr_ibserdesctrl KREG_IDX(IBSerDesCtrl)
  56. #define kr_scratch KREG_IDX(Scratch)
  57. #define kr_xgxs_cfg KREG_IDX(XGXSCfg)
  58. /* these are used only here, not in qib_iba7220.c */
  59. #define kr_ibsd_epb_access_ctrl KREG_IDX(ibsd_epb_access_ctrl)
  60. #define kr_ibsd_epb_transaction_reg KREG_IDX(ibsd_epb_transaction_reg)
  61. #define kr_pciesd_epb_transaction_reg KREG_IDX(pciesd_epb_transaction_reg)
  62. #define kr_pciesd_epb_access_ctrl KREG_IDX(pciesd_epb_access_ctrl)
  63. #define kr_serdes_ddsrxeq0 KREG_IDX(SerDes_DDSRXEQ0)
  64. /*
  65. * The IBSerDesMappTable is a memory that holds values to be stored in
  66. * various SerDes registers by IBC.
  67. */
  68. #define kr_serdes_maptable KREG_IDX(IBSerDesMappTable)
  69. /*
  70. * Below used for sdnum parameter, selecting one of the two sections
  71. * used for PCIe, or the single SerDes used for IB.
  72. */
  73. #define PCIE_SERDES0 0
  74. #define PCIE_SERDES1 1
  75. /*
  76. * The EPB requires addressing in a particular form. EPB_LOC() is intended
  77. * to make #definitions a little more readable.
  78. */
  79. #define EPB_ADDR_SHF 8
  80. #define EPB_LOC(chn, elt, reg) \
  81. (((elt & 0xf) | ((chn & 7) << 4) | ((reg & 0x3f) << 9)) << \
  82. EPB_ADDR_SHF)
  83. #define EPB_IB_QUAD0_CS_SHF (25)
  84. #define EPB_IB_QUAD0_CS (1U << EPB_IB_QUAD0_CS_SHF)
  85. #define EPB_IB_UC_CS_SHF (26)
  86. #define EPB_PCIE_UC_CS_SHF (27)
  87. #define EPB_GLOBAL_WR (1U << (EPB_ADDR_SHF + 8))
  88. /* Forward declarations. */
  89. static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
  90. u32 data, u32 mask);
  91. static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
  92. int mask);
  93. static int qib_sd_trimdone_poll(struct qib_devdata *dd);
  94. static void qib_sd_trimdone_monitor(struct qib_devdata *dd, const char *where);
  95. static int qib_sd_setvals(struct qib_devdata *dd);
  96. static int qib_sd_early(struct qib_devdata *dd);
  97. static int qib_sd_dactrim(struct qib_devdata *dd);
  98. static int qib_internal_presets(struct qib_devdata *dd);
  99. /* Tweak the register (CMUCTRL5) that contains the TRIMSELF controls */
  100. static int qib_sd_trimself(struct qib_devdata *dd, int val);
  101. static int epb_access(struct qib_devdata *dd, int sdnum, int claim);
  102. static int qib_sd7220_ib_load(struct qib_devdata *dd,
  103. const struct firmware *fw);
  104. static int qib_sd7220_ib_vfy(struct qib_devdata *dd,
  105. const struct firmware *fw);
  106. /*
  107. * Below keeps track of whether the "once per power-on" initialization has
  108. * been done, because uC code Version 1.32.17 or higher allows the uC to
  109. * be reset at will, and Automatic Equalization may require it. So the
  110. * state of the reset "pin", is no longer valid. Instead, we check for the
  111. * actual uC code having been loaded.
  112. */
  113. static int qib_ibsd_ucode_loaded(struct qib_pportdata *ppd,
  114. const struct firmware *fw)
  115. {
  116. struct qib_devdata *dd = ppd->dd;
  117. if (!dd->cspec->serdes_first_init_done &&
  118. qib_sd7220_ib_vfy(dd, fw) > 0)
  119. dd->cspec->serdes_first_init_done = 1;
  120. return dd->cspec->serdes_first_init_done;
  121. }
  122. /* repeat #define for local use. "Real" #define is in qib_iba7220.c */
  123. #define QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR 0x0000004000000000ULL
  124. #define IB_MPREG5 (EPB_LOC(6, 0, 0xE) | (1L << EPB_IB_UC_CS_SHF))
  125. #define IB_MPREG6 (EPB_LOC(6, 0, 0xF) | (1U << EPB_IB_UC_CS_SHF))
  126. #define UC_PAR_CLR_D 8
  127. #define UC_PAR_CLR_M 0xC
  128. #define IB_CTRL2(chn) (EPB_LOC(chn, 7, 3) | EPB_IB_QUAD0_CS)
  129. #define START_EQ1(chan) EPB_LOC(chan, 7, 0x27)
  130. void qib_sd7220_clr_ibpar(struct qib_devdata *dd)
  131. {
  132. int ret;
  133. /* clear, then re-enable parity errs */
  134. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6,
  135. UC_PAR_CLR_D, UC_PAR_CLR_M);
  136. if (ret < 0) {
  137. qib_dev_err(dd, "Failed clearing IBSerDes Parity err\n");
  138. goto bail;
  139. }
  140. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0,
  141. UC_PAR_CLR_M);
  142. qib_read_kreg32(dd, kr_scratch);
  143. udelay(4);
  144. qib_write_kreg(dd, kr_hwerrclear,
  145. QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
  146. qib_read_kreg32(dd, kr_scratch);
  147. bail:
  148. return;
  149. }
  150. /*
  151. * After a reset or other unusual event, the epb interface may need
  152. * to be re-synchronized, between the host and the uC.
  153. * returns <0 for failure to resync within IBSD_RESYNC_TRIES (not expected)
  154. */
  155. #define IBSD_RESYNC_TRIES 3
  156. #define IB_PGUDP(chn) (EPB_LOC((chn), 2, 1) | EPB_IB_QUAD0_CS)
  157. #define IB_CMUDONE(chn) (EPB_LOC((chn), 7, 0xF) | EPB_IB_QUAD0_CS)
  158. static int qib_resync_ibepb(struct qib_devdata *dd)
  159. {
  160. int ret, pat, tries, chn;
  161. u32 loc;
  162. ret = -1;
  163. chn = 0;
  164. for (tries = 0; tries < (4 * IBSD_RESYNC_TRIES); ++tries) {
  165. loc = IB_PGUDP(chn);
  166. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
  167. if (ret < 0) {
  168. qib_dev_err(dd, "Failed read in resync\n");
  169. continue;
  170. }
  171. if (ret != 0xF0 && ret != 0x55 && tries == 0)
  172. qib_dev_err(dd, "unexpected pattern in resync\n");
  173. pat = ret ^ 0xA5; /* alternate F0 and 55 */
  174. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, pat, 0xFF);
  175. if (ret < 0) {
  176. qib_dev_err(dd, "Failed write in resync\n");
  177. continue;
  178. }
  179. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
  180. if (ret < 0) {
  181. qib_dev_err(dd, "Failed re-read in resync\n");
  182. continue;
  183. }
  184. if (ret != pat) {
  185. qib_dev_err(dd, "Failed compare1 in resync\n");
  186. continue;
  187. }
  188. loc = IB_CMUDONE(chn);
  189. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, 0, 0);
  190. if (ret < 0) {
  191. qib_dev_err(dd, "Failed CMUDONE rd in resync\n");
  192. continue;
  193. }
  194. if ((ret & 0x70) != ((chn << 4) | 0x40)) {
  195. qib_dev_err(dd, "Bad CMUDONE value %02X, chn %d\n",
  196. ret, chn);
  197. continue;
  198. }
  199. if (++chn == 4)
  200. break; /* Success */
  201. }
  202. return (ret > 0) ? 0 : ret;
  203. }
  204. /*
  205. * Localize the stuff that should be done to change IB uC reset
  206. * returns <0 for errors.
  207. */
  208. static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
  209. {
  210. u64 rst_val;
  211. int ret = 0;
  212. unsigned long flags;
  213. rst_val = qib_read_kreg64(dd, kr_ibserdesctrl);
  214. if (assert_rst) {
  215. /*
  216. * Vendor recommends "interrupting" uC before reset, to
  217. * minimize possible glitches.
  218. */
  219. spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
  220. epb_access(dd, IB_7220_SERDES, 1);
  221. rst_val |= 1ULL;
  222. /* Squelch possible parity error from _asserting_ reset */
  223. qib_write_kreg(dd, kr_hwerrmask,
  224. dd->cspec->hwerrmask &
  225. ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
  226. qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
  227. /* flush write, delay to ensure it took effect */
  228. qib_read_kreg32(dd, kr_scratch);
  229. udelay(2);
  230. /* once it's reset, can remove interrupt */
  231. epb_access(dd, IB_7220_SERDES, -1);
  232. spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
  233. } else {
  234. /*
  235. * Before we de-assert reset, we need to deal with
  236. * possible glitch on the Parity-error line.
  237. * Suppress it around the reset, both in chip-level
  238. * hwerrmask and in IB uC control reg. uC will allow
  239. * it again during startup.
  240. */
  241. u64 val;
  242. rst_val &= ~(1ULL);
  243. qib_write_kreg(dd, kr_hwerrmask,
  244. dd->cspec->hwerrmask &
  245. ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR);
  246. ret = qib_resync_ibepb(dd);
  247. if (ret < 0)
  248. qib_dev_err(dd, "unable to re-sync IB EPB\n");
  249. /* set uC control regs to suppress parity errs */
  250. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG5, 1, 1);
  251. if (ret < 0)
  252. goto bail;
  253. /* IB uC code past Version 1.32.17 allow suppression of wdog */
  254. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80,
  255. 0x80);
  256. if (ret < 0) {
  257. qib_dev_err(dd, "Failed to set WDOG disable\n");
  258. goto bail;
  259. }
  260. qib_write_kreg(dd, kr_ibserdesctrl, rst_val);
  261. /* flush write, delay for startup */
  262. qib_read_kreg32(dd, kr_scratch);
  263. udelay(1);
  264. /* clear, then re-enable parity errs */
  265. qib_sd7220_clr_ibpar(dd);
  266. val = qib_read_kreg64(dd, kr_hwerrstatus);
  267. if (val & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR) {
  268. qib_dev_err(dd, "IBUC Parity still set after RST\n");
  269. dd->cspec->hwerrmask &=
  270. ~QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR;
  271. }
  272. qib_write_kreg(dd, kr_hwerrmask,
  273. dd->cspec->hwerrmask);
  274. }
  275. bail:
  276. return ret;
  277. }
  278. static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
  279. const char *where)
  280. {
  281. int ret, chn, baduns;
  282. u64 val;
  283. if (!where)
  284. where = "?";
  285. /* give time for reset to settle out in EPB */
  286. udelay(2);
  287. ret = qib_resync_ibepb(dd);
  288. if (ret < 0)
  289. qib_dev_err(dd, "not able to re-sync IB EPB (%s)\n", where);
  290. /* Do "sacrificial read" to get EPB in sane state after reset */
  291. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(0), 0, 0);
  292. if (ret < 0)
  293. qib_dev_err(dd, "Failed TRIMDONE 1st read, (%s)\n", where);
  294. /* Check/show "summary" Trim-done bit in IBCStatus */
  295. val = qib_read_kreg64(dd, kr_ibcstatus);
  296. if (!(val & (1ULL << 11)))
  297. qib_dev_err(dd, "IBCS TRIMDONE clear (%s)\n", where);
  298. /*
  299. * Do "dummy read/mod/wr" to get EPB in sane state after reset
  300. * The default value for MPREG6 is 0.
  301. */
  302. udelay(2);
  303. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_MPREG6, 0x80, 0x80);
  304. if (ret < 0)
  305. qib_dev_err(dd, "Failed Dummy RMW, (%s)\n", where);
  306. udelay(10);
  307. baduns = 0;
  308. for (chn = 3; chn >= 0; --chn) {
  309. /* Read CTRL reg for each channel to check TRIMDONE */
  310. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
  311. IB_CTRL2(chn), 0, 0);
  312. if (ret < 0)
  313. qib_dev_err(dd, "Failed checking TRIMDONE, chn %d"
  314. " (%s)\n", chn, where);
  315. if (!(ret & 0x10)) {
  316. int probe;
  317. baduns |= (1 << chn);
  318. qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
  319. " (%s)\n", chn, ret, where);
  320. probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
  321. IB_PGUDP(0), 0, 0);
  322. qib_dev_err(dd, "probe is %d (%02X)\n",
  323. probe, probe);
  324. probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
  325. IB_CTRL2(chn), 0, 0);
  326. qib_dev_err(dd, "re-read: %d (%02X)\n",
  327. probe, probe);
  328. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
  329. IB_CTRL2(chn), 0x10, 0x10);
  330. if (ret < 0)
  331. qib_dev_err(dd,
  332. "Err on TRIMDONE rewrite1\n");
  333. }
  334. }
  335. for (chn = 3; chn >= 0; --chn) {
  336. /* Read CTRL reg for each channel to check TRIMDONE */
  337. if (baduns & (1 << chn)) {
  338. qib_dev_err(dd,
  339. "Reseting TRIMDONE on chn %d (%s)\n",
  340. chn, where);
  341. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
  342. IB_CTRL2(chn), 0x10, 0x10);
  343. if (ret < 0)
  344. qib_dev_err(dd, "Failed re-setting "
  345. "TRIMDONE, chn %d (%s)\n",
  346. chn, where);
  347. }
  348. }
  349. }
  350. /*
  351. * Below is portion of IBA7220-specific bringup_serdes() that actually
  352. * deals with registers and memory within the SerDes itself.
  353. * Post IB uC code version 1.32.17, was_reset being 1 is not really
  354. * informative, so we double-check.
  355. */
  356. int qib_sd7220_init(struct qib_devdata *dd)
  357. {
  358. const struct firmware *fw;
  359. int ret = 1; /* default to failure */
  360. int first_reset, was_reset;
  361. /* SERDES MPU reset recorded in D0 */
  362. was_reset = (qib_read_kreg64(dd, kr_ibserdesctrl) & 1);
  363. if (!was_reset) {
  364. /* entered with reset not asserted, we need to do it */
  365. qib_ibsd_reset(dd, 1);
  366. qib_sd_trimdone_monitor(dd, "Driver-reload");
  367. }
  368. ret = request_firmware(&fw, SD7220_FW_NAME, &dd->pcidev->dev);
  369. if (ret) {
  370. qib_dev_err(dd, "Failed to load IB SERDES image\n");
  371. goto done;
  372. }
  373. /* Substitute our deduced value for was_reset */
  374. ret = qib_ibsd_ucode_loaded(dd->pport, fw);
  375. if (ret < 0)
  376. goto bail;
  377. first_reset = !ret; /* First reset if IBSD uCode not yet loaded */
  378. /*
  379. * Alter some regs per vendor latest doc, reset-defaults
  380. * are not right for IB.
  381. */
  382. ret = qib_sd_early(dd);
  383. if (ret < 0) {
  384. qib_dev_err(dd, "Failed to set IB SERDES early defaults\n");
  385. goto bail;
  386. }
  387. /*
  388. * Set DAC manual trim IB.
  389. * We only do this once after chip has been reset (usually
  390. * same as once per system boot).
  391. */
  392. if (first_reset) {
  393. ret = qib_sd_dactrim(dd);
  394. if (ret < 0) {
  395. qib_dev_err(dd, "Failed IB SERDES DAC trim\n");
  396. goto bail;
  397. }
  398. }
  399. /*
  400. * Set various registers (DDS and RXEQ) that will be
  401. * controlled by IBC (in 1.2 mode) to reasonable preset values
  402. * Calling the "internal" version avoids the "check for needed"
  403. * and "trimdone monitor" that might be counter-productive.
  404. */
  405. ret = qib_internal_presets(dd);
  406. if (ret < 0) {
  407. qib_dev_err(dd, "Failed to set IB SERDES presets\n");
  408. goto bail;
  409. }
  410. ret = qib_sd_trimself(dd, 0x80);
  411. if (ret < 0) {
  412. qib_dev_err(dd, "Failed to set IB SERDES TRIMSELF\n");
  413. goto bail;
  414. }
  415. /* Load image, then try to verify */
  416. ret = 0; /* Assume success */
  417. if (first_reset) {
  418. int vfy;
  419. int trim_done;
  420. ret = qib_sd7220_ib_load(dd, fw);
  421. if (ret < 0) {
  422. qib_dev_err(dd, "Failed to load IB SERDES image\n");
  423. goto bail;
  424. } else {
  425. /* Loaded image, try to verify */
  426. vfy = qib_sd7220_ib_vfy(dd, fw);
  427. if (vfy != ret) {
  428. qib_dev_err(dd, "SERDES PRAM VFY failed\n");
  429. goto bail;
  430. } /* end if verified */
  431. } /* end if loaded */
  432. /*
  433. * Loaded and verified. Almost good...
  434. * hold "success" in ret
  435. */
  436. ret = 0;
  437. /*
  438. * Prev steps all worked, continue bringup
  439. * De-assert RESET to uC, only in first reset, to allow
  440. * trimming.
  441. *
  442. * Since our default setup sets START_EQ1 to
  443. * PRESET, we need to clear that for this very first run.
  444. */
  445. ret = ibsd_mod_allchnls(dd, START_EQ1(0), 0, 0x38);
  446. if (ret < 0) {
  447. qib_dev_err(dd, "Failed clearing START_EQ1\n");
  448. goto bail;
  449. }
  450. qib_ibsd_reset(dd, 0);
  451. /*
  452. * If this is not the first reset, trimdone should be set
  453. * already. We may need to check about this.
  454. */
  455. trim_done = qib_sd_trimdone_poll(dd);
  456. /*
  457. * Whether or not trimdone succeeded, we need to put the
  458. * uC back into reset to avoid a possible fight with the
  459. * IBC state-machine.
  460. */
  461. qib_ibsd_reset(dd, 1);
  462. if (!trim_done) {
  463. qib_dev_err(dd, "No TRIMDONE seen\n");
  464. goto bail;
  465. }
  466. /*
  467. * DEBUG: check each time we reset if trimdone bits have
  468. * gotten cleared, and re-set them.
  469. */
  470. qib_sd_trimdone_monitor(dd, "First-reset");
  471. /* Remember so we do not re-do the load, dactrim, etc. */
  472. dd->cspec->serdes_first_init_done = 1;
  473. }
  474. /*
  475. * setup for channel training and load values for
  476. * RxEq and DDS in tables used by IBC in IB1.2 mode
  477. */
  478. ret = 0;
  479. if (qib_sd_setvals(dd) >= 0)
  480. goto done;
  481. bail:
  482. ret = 1;
  483. done:
  484. /* start relock timer regardless, but start at 1 second */
  485. set_7220_relock_poll(dd, -1);
  486. release_firmware(fw);
  487. return ret;
  488. }
  489. #define EPB_ACC_REQ 1
  490. #define EPB_ACC_GNT 0x100
  491. #define EPB_DATA_MASK 0xFF
  492. #define EPB_RD (1ULL << 24)
  493. #define EPB_TRANS_RDY (1ULL << 31)
  494. #define EPB_TRANS_ERR (1ULL << 30)
  495. #define EPB_TRANS_TRIES 5
  496. /*
  497. * query, claim, release ownership of the EPB (External Parallel Bus)
  498. * for a specified SERDES.
  499. * the "claim" parameter is >0 to claim, <0 to release, 0 to query.
  500. * Returns <0 for errors, >0 if we had ownership, else 0.
  501. */
  502. static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
  503. {
  504. u16 acc;
  505. u64 accval;
  506. int owned = 0;
  507. u64 oct_sel = 0;
  508. switch (sdnum) {
  509. case IB_7220_SERDES:
  510. /*
  511. * The IB SERDES "ownership" is fairly simple. A single each
  512. * request/grant.
  513. */
  514. acc = kr_ibsd_epb_access_ctrl;
  515. break;
  516. case PCIE_SERDES0:
  517. case PCIE_SERDES1:
  518. /* PCIe SERDES has two "octants", need to select which */
  519. acc = kr_pciesd_epb_access_ctrl;
  520. oct_sel = (2 << (sdnum - PCIE_SERDES0));
  521. break;
  522. default:
  523. return 0;
  524. }
  525. /* Make sure any outstanding transaction was seen */
  526. qib_read_kreg32(dd, kr_scratch);
  527. udelay(15);
  528. accval = qib_read_kreg32(dd, acc);
  529. owned = !!(accval & EPB_ACC_GNT);
  530. if (claim < 0) {
  531. /* Need to release */
  532. u64 pollval;
  533. /*
  534. * The only writeable bits are the request and CS.
  535. * Both should be clear
  536. */
  537. u64 newval = 0;
  538. qib_write_kreg(dd, acc, newval);
  539. /* First read after write is not trustworthy */
  540. pollval = qib_read_kreg32(dd, acc);
  541. udelay(5);
  542. pollval = qib_read_kreg32(dd, acc);
  543. if (pollval & EPB_ACC_GNT)
  544. owned = -1;
  545. } else if (claim > 0) {
  546. /* Need to claim */
  547. u64 pollval;
  548. u64 newval = EPB_ACC_REQ | oct_sel;
  549. qib_write_kreg(dd, acc, newval);
  550. /* First read after write is not trustworthy */
  551. pollval = qib_read_kreg32(dd, acc);
  552. udelay(5);
  553. pollval = qib_read_kreg32(dd, acc);
  554. if (!(pollval & EPB_ACC_GNT))
  555. owned = -1;
  556. }
  557. return owned;
  558. }
  559. /*
  560. * Lemma to deal with race condition of write..read to epb regs
  561. */
  562. static int epb_trans(struct qib_devdata *dd, u16 reg, u64 i_val, u64 *o_vp)
  563. {
  564. int tries;
  565. u64 transval;
  566. qib_write_kreg(dd, reg, i_val);
  567. /* Throw away first read, as RDY bit may be stale */
  568. transval = qib_read_kreg64(dd, reg);
  569. for (tries = EPB_TRANS_TRIES; tries; --tries) {
  570. transval = qib_read_kreg32(dd, reg);
  571. if (transval & EPB_TRANS_RDY)
  572. break;
  573. udelay(5);
  574. }
  575. if (transval & EPB_TRANS_ERR)
  576. return -1;
  577. if (tries > 0 && o_vp)
  578. *o_vp = transval;
  579. return tries;
  580. }
  581. /**
  582. * qib_sd7220_reg_mod - modify SERDES register
  583. * @dd: the qlogic_ib device
  584. * @sdnum: which SERDES to access
  585. * @loc: location - channel, element, register, as packed by EPB_LOC() macro.
  586. * @wd: Write Data - value to set in register
  587. * @mask: ones where data should be spliced into reg.
  588. *
  589. * Basic register read/modify/write, with un-needed acesses elided. That is,
  590. * a mask of zero will prevent write, while a mask of 0xFF will prevent read.
  591. * returns current (presumed, if a write was done) contents of selected
  592. * register, or <0 if errors.
  593. */
  594. static int qib_sd7220_reg_mod(struct qib_devdata *dd, int sdnum, u32 loc,
  595. u32 wd, u32 mask)
  596. {
  597. u16 trans;
  598. u64 transval;
  599. int owned;
  600. int tries, ret;
  601. unsigned long flags;
  602. switch (sdnum) {
  603. case IB_7220_SERDES:
  604. trans = kr_ibsd_epb_transaction_reg;
  605. break;
  606. case PCIE_SERDES0:
  607. case PCIE_SERDES1:
  608. trans = kr_pciesd_epb_transaction_reg;
  609. break;
  610. default:
  611. return -1;
  612. }
  613. /*
  614. * All access is locked in software (vs other host threads) and
  615. * hardware (vs uC access).
  616. */
  617. spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
  618. owned = epb_access(dd, sdnum, 1);
  619. if (owned < 0) {
  620. spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
  621. return -1;
  622. }
  623. ret = 0;
  624. for (tries = EPB_TRANS_TRIES; tries; --tries) {
  625. transval = qib_read_kreg32(dd, trans);
  626. if (transval & EPB_TRANS_RDY)
  627. break;
  628. udelay(5);
  629. }
  630. if (tries > 0) {
  631. tries = 1; /* to make read-skip work */
  632. if (mask != 0xFF) {
  633. /*
  634. * Not a pure write, so need to read.
  635. * loc encodes chip-select as well as address
  636. */
  637. transval = loc | EPB_RD;
  638. tries = epb_trans(dd, trans, transval, &transval);
  639. }
  640. if (tries > 0 && mask != 0) {
  641. /*
  642. * Not a pure read, so need to write.
  643. */
  644. wd = (wd & mask) | (transval & ~mask);
  645. transval = loc | (wd & EPB_DATA_MASK);
  646. tries = epb_trans(dd, trans, transval, &transval);
  647. }
  648. }
  649. /* else, failed to see ready, what error-handling? */
  650. /*
  651. * Release bus. Failure is an error.
  652. */
  653. if (epb_access(dd, sdnum, -1) < 0)
  654. ret = -1;
  655. else
  656. ret = transval & EPB_DATA_MASK;
  657. spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
  658. if (tries <= 0)
  659. ret = -1;
  660. return ret;
  661. }
  662. #define EPB_ROM_R (2)
  663. #define EPB_ROM_W (1)
  664. /*
  665. * Below, all uC-related, use appropriate UC_CS, depending
  666. * on which SerDes is used.
  667. */
  668. #define EPB_UC_CTL EPB_LOC(6, 0, 0)
  669. #define EPB_MADDRL EPB_LOC(6, 0, 2)
  670. #define EPB_MADDRH EPB_LOC(6, 0, 3)
  671. #define EPB_ROMDATA EPB_LOC(6, 0, 4)
  672. #define EPB_RAMDATA EPB_LOC(6, 0, 5)
  673. /* Transfer date to/from uC Program RAM of IB or PCIe SerDes */
  674. static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
  675. u8 *buf, int cnt, int rd_notwr)
  676. {
  677. u16 trans;
  678. u64 transval;
  679. u64 csbit;
  680. int owned;
  681. int tries;
  682. int sofar;
  683. int addr;
  684. int ret;
  685. unsigned long flags;
  686. const char *op;
  687. /* Pick appropriate transaction reg and "Chip select" for this serdes */
  688. switch (sdnum) {
  689. case IB_7220_SERDES:
  690. csbit = 1ULL << EPB_IB_UC_CS_SHF;
  691. trans = kr_ibsd_epb_transaction_reg;
  692. break;
  693. case PCIE_SERDES0:
  694. case PCIE_SERDES1:
  695. /* PCIe SERDES has uC "chip select" in different bit, too */
  696. csbit = 1ULL << EPB_PCIE_UC_CS_SHF;
  697. trans = kr_pciesd_epb_transaction_reg;
  698. break;
  699. default:
  700. return -1;
  701. }
  702. op = rd_notwr ? "Rd" : "Wr";
  703. spin_lock_irqsave(&dd->cspec->sdepb_lock, flags);
  704. owned = epb_access(dd, sdnum, 1);
  705. if (owned < 0) {
  706. spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
  707. return -1;
  708. }
  709. /*
  710. * In future code, we may need to distinguish several address ranges,
  711. * and select various memories based on this. For now, just trim
  712. * "loc" (location including address and memory select) to
  713. * "addr" (address within memory). we will only support PRAM
  714. * The memory is 8KB.
  715. */
  716. addr = loc & 0x1FFF;
  717. for (tries = EPB_TRANS_TRIES; tries; --tries) {
  718. transval = qib_read_kreg32(dd, trans);
  719. if (transval & EPB_TRANS_RDY)
  720. break;
  721. udelay(5);
  722. }
  723. sofar = 0;
  724. if (tries > 0) {
  725. /*
  726. * Every "memory" access is doubly-indirect.
  727. * We set two bytes of address, then read/write
  728. * one or mores bytes of data.
  729. */
  730. /* First, we set control to "Read" or "Write" */
  731. transval = csbit | EPB_UC_CTL |
  732. (rd_notwr ? EPB_ROM_R : EPB_ROM_W);
  733. tries = epb_trans(dd, trans, transval, &transval);
  734. while (tries > 0 && sofar < cnt) {
  735. if (!sofar) {
  736. /* Only set address at start of chunk */
  737. int addrbyte = (addr + sofar) >> 8;
  738. transval = csbit | EPB_MADDRH | addrbyte;
  739. tries = epb_trans(dd, trans, transval,
  740. &transval);
  741. if (tries <= 0)
  742. break;
  743. addrbyte = (addr + sofar) & 0xFF;
  744. transval = csbit | EPB_MADDRL | addrbyte;
  745. tries = epb_trans(dd, trans, transval,
  746. &transval);
  747. if (tries <= 0)
  748. break;
  749. }
  750. if (rd_notwr)
  751. transval = csbit | EPB_ROMDATA | EPB_RD;
  752. else
  753. transval = csbit | EPB_ROMDATA | buf[sofar];
  754. tries = epb_trans(dd, trans, transval, &transval);
  755. if (tries <= 0)
  756. break;
  757. if (rd_notwr)
  758. buf[sofar] = transval & EPB_DATA_MASK;
  759. ++sofar;
  760. }
  761. /* Finally, clear control-bit for Read or Write */
  762. transval = csbit | EPB_UC_CTL;
  763. tries = epb_trans(dd, trans, transval, &transval);
  764. }
  765. ret = sofar;
  766. /* Release bus. Failure is an error */
  767. if (epb_access(dd, sdnum, -1) < 0)
  768. ret = -1;
  769. spin_unlock_irqrestore(&dd->cspec->sdepb_lock, flags);
  770. if (tries <= 0)
  771. ret = -1;
  772. return ret;
  773. }
  774. #define PROG_CHUNK 64
  775. static int qib_sd7220_prog_ld(struct qib_devdata *dd, int sdnum,
  776. const u8 *img, int len, int offset)
  777. {
  778. int cnt, sofar, req;
  779. sofar = 0;
  780. while (sofar < len) {
  781. req = len - sofar;
  782. if (req > PROG_CHUNK)
  783. req = PROG_CHUNK;
  784. cnt = qib_sd7220_ram_xfer(dd, sdnum, offset + sofar,
  785. (u8 *)img + sofar, req, 0);
  786. if (cnt < req) {
  787. sofar = -1;
  788. break;
  789. }
  790. sofar += req;
  791. }
  792. return sofar;
  793. }
  794. #define VFY_CHUNK 64
  795. #define SD_PRAM_ERROR_LIMIT 42
  796. static int qib_sd7220_prog_vfy(struct qib_devdata *dd, int sdnum,
  797. const u8 *img, int len, int offset)
  798. {
  799. int cnt, sofar, req, idx, errors;
  800. unsigned char readback[VFY_CHUNK];
  801. errors = 0;
  802. sofar = 0;
  803. while (sofar < len) {
  804. req = len - sofar;
  805. if (req > VFY_CHUNK)
  806. req = VFY_CHUNK;
  807. cnt = qib_sd7220_ram_xfer(dd, sdnum, sofar + offset,
  808. readback, req, 1);
  809. if (cnt < req) {
  810. /* failed in read itself */
  811. sofar = -1;
  812. break;
  813. }
  814. for (idx = 0; idx < cnt; ++idx) {
  815. if (readback[idx] != img[idx+sofar])
  816. ++errors;
  817. }
  818. sofar += cnt;
  819. }
  820. return errors ? -errors : sofar;
  821. }
  822. static int
  823. qib_sd7220_ib_load(struct qib_devdata *dd, const struct firmware *fw)
  824. {
  825. return qib_sd7220_prog_ld(dd, IB_7220_SERDES, fw->data, fw->size, 0);
  826. }
  827. static int
  828. qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
  829. {
  830. return qib_sd7220_prog_vfy(dd, IB_7220_SERDES, fw->data, fw->size, 0);
  831. }
  832. /*
  833. * IRQ not set up at this point in init, so we poll.
  834. */
  835. #define IB_SERDES_TRIM_DONE (1ULL << 11)
  836. #define TRIM_TMO (30)
  837. static int qib_sd_trimdone_poll(struct qib_devdata *dd)
  838. {
  839. int trim_tmo, ret;
  840. uint64_t val;
  841. /*
  842. * Default to failure, so IBC will not start
  843. * without IB_SERDES_TRIM_DONE.
  844. */
  845. ret = 0;
  846. for (trim_tmo = 0; trim_tmo < TRIM_TMO; ++trim_tmo) {
  847. val = qib_read_kreg64(dd, kr_ibcstatus);
  848. if (val & IB_SERDES_TRIM_DONE) {
  849. ret = 1;
  850. break;
  851. }
  852. msleep(10);
  853. }
  854. if (trim_tmo >= TRIM_TMO) {
  855. qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
  856. ret = 0;
  857. }
  858. return ret;
  859. }
  860. #define TX_FAST_ELT (9)
  861. /*
  862. * Set the "negotiation" values for SERDES. These are used by the IB1.2
  863. * link negotiation. Macros below are attempt to keep the values a
  864. * little more human-editable.
  865. * First, values related to Drive De-emphasis Settings.
  866. */
  867. #define NUM_DDS_REGS 6
  868. #define DDS_REG_MAP 0x76A910 /* LSB-first list of regs (in elt 9) to mod */
  869. #define DDS_VAL(amp_d, main_d, ipst_d, ipre_d, amp_s, main_s, ipst_s, ipre_s) \
  870. { { ((amp_d & 0x1F) << 1) | 1, ((amp_s & 0x1F) << 1) | 1, \
  871. (main_d << 3) | 4 | (ipre_d >> 2), \
  872. (main_s << 3) | 4 | (ipre_s >> 2), \
  873. ((ipst_d & 0xF) << 1) | ((ipre_d & 3) << 6) | 0x21, \
  874. ((ipst_s & 0xF) << 1) | ((ipre_s & 3) << 6) | 0x21 } }
  875. static struct dds_init {
  876. uint8_t reg_vals[NUM_DDS_REGS];
  877. } dds_init_vals[] = {
  878. /* DDR(FDR) SDR(HDR) */
  879. /* Vendor recommends below for 3m cable */
  880. #define DDS_3M 0
  881. DDS_VAL(31, 19, 12, 0, 29, 22, 9, 0),
  882. DDS_VAL(31, 12, 15, 4, 31, 15, 15, 1),
  883. DDS_VAL(31, 13, 15, 3, 31, 16, 15, 0),
  884. DDS_VAL(31, 14, 15, 2, 31, 17, 14, 0),
  885. DDS_VAL(31, 15, 15, 1, 31, 18, 13, 0),
  886. DDS_VAL(31, 16, 15, 0, 31, 19, 12, 0),
  887. DDS_VAL(31, 17, 14, 0, 31, 20, 11, 0),
  888. DDS_VAL(31, 18, 13, 0, 30, 21, 10, 0),
  889. DDS_VAL(31, 20, 11, 0, 28, 23, 8, 0),
  890. DDS_VAL(31, 21, 10, 0, 27, 24, 7, 0),
  891. DDS_VAL(31, 22, 9, 0, 26, 25, 6, 0),
  892. DDS_VAL(30, 23, 8, 0, 25, 26, 5, 0),
  893. DDS_VAL(29, 24, 7, 0, 23, 27, 4, 0),
  894. /* Vendor recommends below for 1m cable */
  895. #define DDS_1M 13
  896. DDS_VAL(28, 25, 6, 0, 21, 28, 3, 0),
  897. DDS_VAL(27, 26, 5, 0, 19, 29, 2, 0),
  898. DDS_VAL(25, 27, 4, 0, 17, 30, 1, 0)
  899. };
  900. /*
  901. * Now the RXEQ section of the table.
  902. */
  903. /* Hardware packs an element number and register address thus: */
  904. #define RXEQ_INIT_RDESC(elt, addr) (((elt) & 0xF) | ((addr) << 4))
  905. #define RXEQ_VAL(elt, adr, val0, val1, val2, val3) \
  906. {RXEQ_INIT_RDESC((elt), (adr)), {(val0), (val1), (val2), (val3)} }
  907. #define RXEQ_VAL_ALL(elt, adr, val) \
  908. {RXEQ_INIT_RDESC((elt), (adr)), {(val), (val), (val), (val)} }
  909. #define RXEQ_SDR_DFELTH 0
  910. #define RXEQ_SDR_TLTH 0
  911. #define RXEQ_SDR_G1CNT_Z1CNT 0x11
  912. #define RXEQ_SDR_ZCNT 23
  913. static struct rxeq_init {
  914. u16 rdesc; /* in form used in SerDesDDSRXEQ */
  915. u8 rdata[4];
  916. } rxeq_init_vals[] = {
  917. /* Set Rcv Eq. to Preset node */
  918. RXEQ_VAL_ALL(7, 0x27, 0x10),
  919. /* Set DFELTHFDR/HDR thresholds */
  920. RXEQ_VAL(7, 8, 0, 0, 0, 0), /* FDR, was 0, 1, 2, 3 */
  921. RXEQ_VAL(7, 0x21, 0, 0, 0, 0), /* HDR */
  922. /* Set TLTHFDR/HDR theshold */
  923. RXEQ_VAL(7, 9, 2, 2, 2, 2), /* FDR, was 0, 2, 4, 6 */
  924. RXEQ_VAL(7, 0x23, 2, 2, 2, 2), /* HDR, was 0, 1, 2, 3 */
  925. /* Set Preamp setting 2 (ZFR/ZCNT) */
  926. RXEQ_VAL(7, 0x1B, 12, 12, 12, 12), /* FDR, was 12, 16, 20, 24 */
  927. RXEQ_VAL(7, 0x1C, 12, 12, 12, 12), /* HDR, was 12, 16, 20, 24 */
  928. /* Set Preamp DC gain and Setting 1 (GFR/GHR) */
  929. RXEQ_VAL(7, 0x1E, 16, 16, 16, 16), /* FDR, was 16, 17, 18, 20 */
  930. RXEQ_VAL(7, 0x1F, 16, 16, 16, 16), /* HDR, was 16, 17, 18, 20 */
  931. /* Toggle RELOCK (in VCDL_CTRL0) to lock to data */
  932. RXEQ_VAL_ALL(6, 6, 0x20), /* Set D5 High */
  933. RXEQ_VAL_ALL(6, 6, 0), /* Set D5 Low */
  934. };
  935. /* There are 17 values from vendor, but IBC only accesses the first 16 */
  936. #define DDS_ROWS (16)
  937. #define RXEQ_ROWS ARRAY_SIZE(rxeq_init_vals)
  938. static int qib_sd_setvals(struct qib_devdata *dd)
  939. {
  940. int idx, midx;
  941. int min_idx; /* Minimum index for this portion of table */
  942. uint32_t dds_reg_map;
  943. u64 __iomem *taddr, *iaddr;
  944. uint64_t data;
  945. uint64_t sdctl;
  946. taddr = dd->kregbase + kr_serdes_maptable;
  947. iaddr = dd->kregbase + kr_serdes_ddsrxeq0;
  948. /*
  949. * Init the DDS section of the table.
  950. * Each "row" of the table provokes NUM_DDS_REG writes, to the
  951. * registers indicated in DDS_REG_MAP.
  952. */
  953. sdctl = qib_read_kreg64(dd, kr_ibserdesctrl);
  954. sdctl = (sdctl & ~(0x1f << 8)) | (NUM_DDS_REGS << 8);
  955. sdctl = (sdctl & ~(0x1f << 13)) | (RXEQ_ROWS << 13);
  956. qib_write_kreg(dd, kr_ibserdesctrl, sdctl);
  957. /*
  958. * Iterate down table within loop for each register to store.
  959. */
  960. dds_reg_map = DDS_REG_MAP;
  961. for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
  962. data = ((dds_reg_map & 0xF) << 4) | TX_FAST_ELT;
  963. writeq(data, iaddr + idx);
  964. mmiowb();
  965. qib_read_kreg32(dd, kr_scratch);
  966. dds_reg_map >>= 4;
  967. for (midx = 0; midx < DDS_ROWS; ++midx) {
  968. u64 __iomem *daddr = taddr + ((midx << 4) + idx);
  969. data = dds_init_vals[midx].reg_vals[idx];
  970. writeq(data, daddr);
  971. mmiowb();
  972. qib_read_kreg32(dd, kr_scratch);
  973. } /* End inner for (vals for this reg, each row) */
  974. } /* end outer for (regs to be stored) */
  975. /*
  976. * Init the RXEQ section of the table.
  977. * This runs in a different order, as the pattern of
  978. * register references is more complex, but there are only
  979. * four "data" values per register.
  980. */
  981. min_idx = idx; /* RXEQ indices pick up where DDS left off */
  982. taddr += 0x100; /* RXEQ data is in second half of table */
  983. /* Iterate through RXEQ register addresses */
  984. for (idx = 0; idx < RXEQ_ROWS; ++idx) {
  985. int didx; /* "destination" */
  986. int vidx;
  987. /* didx is offset by min_idx to address RXEQ range of regs */
  988. didx = idx + min_idx;
  989. /* Store the next RXEQ register address */
  990. writeq(rxeq_init_vals[idx].rdesc, iaddr + didx);
  991. mmiowb();
  992. qib_read_kreg32(dd, kr_scratch);
  993. /* Iterate through RXEQ values */
  994. for (vidx = 0; vidx < 4; vidx++) {
  995. data = rxeq_init_vals[idx].rdata[vidx];
  996. writeq(data, taddr + (vidx << 6) + idx);
  997. mmiowb();
  998. qib_read_kreg32(dd, kr_scratch);
  999. }
  1000. } /* end outer for (Reg-writes for RXEQ) */
  1001. return 0;
  1002. }
  1003. #define CMUCTRL5 EPB_LOC(7, 0, 0x15)
  1004. #define RXHSCTRL0(chan) EPB_LOC(chan, 6, 0)
  1005. #define VCDL_DAC2(chan) EPB_LOC(chan, 6, 5)
  1006. #define VCDL_CTRL0(chan) EPB_LOC(chan, 6, 6)
  1007. #define VCDL_CTRL2(chan) EPB_LOC(chan, 6, 8)
  1008. #define START_EQ2(chan) EPB_LOC(chan, 7, 0x28)
  1009. /*
  1010. * Repeat a "store" across all channels of the IB SerDes.
  1011. * Although nominally it inherits the "read value" of the last
  1012. * channel it modified, the only really useful return is <0 for
  1013. * failure, >= 0 for success. The parameter 'loc' is assumed to
  1014. * be the location in some channel of the register to be modified
  1015. * The caller can specify use of the "gang write" option of EPB,
  1016. * in which case we use the specified channel data for any fields
  1017. * not explicitely written.
  1018. */
  1019. static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
  1020. int mask)
  1021. {
  1022. int ret = -1;
  1023. int chnl;
  1024. if (loc & EPB_GLOBAL_WR) {
  1025. /*
  1026. * Our caller has assured us that we can set all four
  1027. * channels at once. Trust that. If mask is not 0xFF,
  1028. * we will read the _specified_ channel for our starting
  1029. * value.
  1030. */
  1031. loc |= (1U << EPB_IB_QUAD0_CS_SHF);
  1032. chnl = (loc >> (4 + EPB_ADDR_SHF)) & 7;
  1033. if (mask != 0xFF) {
  1034. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
  1035. loc & ~EPB_GLOBAL_WR, 0, 0);
  1036. if (ret < 0) {
  1037. int sloc = loc >> EPB_ADDR_SHF;
  1038. qib_dev_err(dd, "pre-read failed: elt %d,"
  1039. " addr 0x%X, chnl %d\n",
  1040. (sloc & 0xF),
  1041. (sloc >> 9) & 0x3f, chnl);
  1042. return ret;
  1043. }
  1044. val = (ret & ~mask) | (val & mask);
  1045. }
  1046. loc &= ~(7 << (4+EPB_ADDR_SHF));
  1047. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
  1048. if (ret < 0) {
  1049. int sloc = loc >> EPB_ADDR_SHF;
  1050. qib_dev_err(dd, "Global WR failed: elt %d,"
  1051. " addr 0x%X, val %02X\n",
  1052. (sloc & 0xF), (sloc >> 9) & 0x3f, val);
  1053. }
  1054. return ret;
  1055. }
  1056. /* Clear "channel" and set CS so we can simply iterate */
  1057. loc &= ~(7 << (4+EPB_ADDR_SHF));
  1058. loc |= (1U << EPB_IB_QUAD0_CS_SHF);
  1059. for (chnl = 0; chnl < 4; ++chnl) {
  1060. int cloc = loc | (chnl << (4+EPB_ADDR_SHF));
  1061. ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, cloc, val, mask);
  1062. if (ret < 0) {
  1063. int sloc = loc >> EPB_ADDR_SHF;
  1064. qib_dev_err(dd, "Write failed: elt %d,"
  1065. " addr 0x%X, chnl %d, val 0x%02X,"
  1066. " mask 0x%02X\n",
  1067. (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
  1068. val & 0xFF, mask & 0xFF);
  1069. break;
  1070. }
  1071. }
  1072. return ret;
  1073. }
  1074. /*
  1075. * Set the Tx values normally modified by IBC in IB1.2 mode to default
  1076. * values, as gotten from first row of init table.
  1077. */
  1078. static int set_dds_vals(struct qib_devdata *dd, struct dds_init *ddi)
  1079. {
  1080. int ret;
  1081. int idx, reg, data;
  1082. uint32_t regmap;
  1083. regmap = DDS_REG_MAP;
  1084. for (idx = 0; idx < NUM_DDS_REGS; ++idx) {
  1085. reg = (regmap & 0xF);
  1086. regmap >>= 4;
  1087. data = ddi->reg_vals[idx];
  1088. /* Vendor says RMW not needed for these regs, use 0xFF mask */
  1089. ret = ibsd_mod_allchnls(dd, EPB_LOC(0, 9, reg), data, 0xFF);
  1090. if (ret < 0)
  1091. break;
  1092. }
  1093. return ret;
  1094. }
  1095. /*
  1096. * Set the Rx values normally modified by IBC in IB1.2 mode to default
  1097. * values, as gotten from selected column of init table.
  1098. */
  1099. static int set_rxeq_vals(struct qib_devdata *dd, int vsel)
  1100. {
  1101. int ret;
  1102. int ridx;
  1103. int cnt = ARRAY_SIZE(rxeq_init_vals);
  1104. for (ridx = 0; ridx < cnt; ++ridx) {
  1105. int elt, reg, val, loc;
  1106. elt = rxeq_init_vals[ridx].rdesc & 0xF;
  1107. reg = rxeq_init_vals[ridx].rdesc >> 4;
  1108. loc = EPB_LOC(0, elt, reg);
  1109. val = rxeq_init_vals[ridx].rdata[vsel];
  1110. /* mask of 0xFF, because hardware does full-byte store. */
  1111. ret = ibsd_mod_allchnls(dd, loc, val, 0xFF);
  1112. if (ret < 0)
  1113. break;
  1114. }
  1115. return ret;
  1116. }
  1117. /*
  1118. * Set the default values (row 0) for DDR Driver Demphasis.
  1119. * we do this initially and whenever we turn off IB-1.2
  1120. *
  1121. * The "default" values for Rx equalization are also stored to
  1122. * SerDes registers. Formerly (and still default), we used set 2.
  1123. * For experimenting with cables and link-partners, we allow changing
  1124. * that via a module parameter.
  1125. */
  1126. static unsigned qib_rxeq_set = 2;
  1127. module_param_named(rxeq_default_set, qib_rxeq_set, uint,
  1128. S_IWUSR | S_IRUGO);
  1129. MODULE_PARM_DESC(rxeq_default_set,
  1130. "Which set [0..3] of Rx Equalization values is default");
  1131. static int qib_internal_presets(struct qib_devdata *dd)
  1132. {
  1133. int ret = 0;
  1134. ret = set_dds_vals(dd, dds_init_vals + DDS_3M);
  1135. if (ret < 0)
  1136. qib_dev_err(dd, "Failed to set default DDS values\n");
  1137. ret = set_rxeq_vals(dd, qib_rxeq_set & 3);
  1138. if (ret < 0)
  1139. qib_dev_err(dd, "Failed to set default RXEQ values\n");
  1140. return ret;
  1141. }
  1142. int qib_sd7220_presets(struct qib_devdata *dd)
  1143. {
  1144. int ret = 0;
  1145. if (!dd->cspec->presets_needed)
  1146. return ret;
  1147. dd->cspec->presets_needed = 0;
  1148. /* Assert uC reset, so we don't clash with it. */
  1149. qib_ibsd_reset(dd, 1);
  1150. udelay(2);
  1151. qib_sd_trimdone_monitor(dd, "link-down");
  1152. ret = qib_internal_presets(dd);
  1153. return ret;
  1154. }
  1155. static int qib_sd_trimself(struct qib_devdata *dd, int val)
  1156. {
  1157. int loc = CMUCTRL5 | (1U << EPB_IB_QUAD0_CS_SHF);
  1158. return qib_sd7220_reg_mod(dd, IB_7220_SERDES, loc, val, 0xFF);
  1159. }
  1160. static int qib_sd_early(struct qib_devdata *dd)
  1161. {
  1162. int ret;
  1163. ret = ibsd_mod_allchnls(dd, RXHSCTRL0(0) | EPB_GLOBAL_WR, 0xD4, 0xFF);
  1164. if (ret < 0)
  1165. goto bail;
  1166. ret = ibsd_mod_allchnls(dd, START_EQ1(0) | EPB_GLOBAL_WR, 0x10, 0xFF);
  1167. if (ret < 0)
  1168. goto bail;
  1169. ret = ibsd_mod_allchnls(dd, START_EQ2(0) | EPB_GLOBAL_WR, 0x30, 0xFF);
  1170. bail:
  1171. return ret;
  1172. }
  1173. #define BACTRL(chnl) EPB_LOC(chnl, 6, 0x0E)
  1174. #define LDOUTCTRL1(chnl) EPB_LOC(chnl, 7, 6)
  1175. #define RXHSSTATUS(chnl) EPB_LOC(chnl, 6, 0xF)
  1176. static int qib_sd_dactrim(struct qib_devdata *dd)
  1177. {
  1178. int ret;
  1179. ret = ibsd_mod_allchnls(dd, VCDL_DAC2(0) | EPB_GLOBAL_WR, 0x2D, 0xFF);
  1180. if (ret < 0)
  1181. goto bail;
  1182. /* more fine-tuning of what will be default */
  1183. ret = ibsd_mod_allchnls(dd, VCDL_CTRL2(0), 3, 0xF);
  1184. if (ret < 0)
  1185. goto bail;
  1186. ret = ibsd_mod_allchnls(dd, BACTRL(0) | EPB_GLOBAL_WR, 0x40, 0xFF);
  1187. if (ret < 0)
  1188. goto bail;
  1189. ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
  1190. if (ret < 0)
  1191. goto bail;
  1192. ret = ibsd_mod_allchnls(dd, RXHSSTATUS(0) | EPB_GLOBAL_WR, 0x04, 0xFF);
  1193. if (ret < 0)
  1194. goto bail;
  1195. /*
  1196. * Delay for max possible number of steps, with slop.
  1197. * Each step is about 4usec.
  1198. */
  1199. udelay(415);
  1200. ret = ibsd_mod_allchnls(dd, LDOUTCTRL1(0) | EPB_GLOBAL_WR, 0x00, 0xFF);
  1201. bail:
  1202. return ret;
  1203. }
  1204. #define RELOCK_FIRST_MS 3
  1205. #define RXLSPPM(chan) EPB_LOC(chan, 0, 2)
  1206. void toggle_7220_rclkrls(struct qib_devdata *dd)
  1207. {
  1208. int loc = RXLSPPM(0) | EPB_GLOBAL_WR;
  1209. int ret;
  1210. ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
  1211. if (ret < 0)
  1212. qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
  1213. else {
  1214. udelay(1);
  1215. ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
  1216. }
  1217. /* And again for good measure */
  1218. udelay(1);
  1219. ret = ibsd_mod_allchnls(dd, loc, 0, 0x80);
  1220. if (ret < 0)
  1221. qib_dev_err(dd, "RCLKRLS failed to clear D7\n");
  1222. else {
  1223. udelay(1);
  1224. ibsd_mod_allchnls(dd, loc, 0x80, 0x80);
  1225. }
  1226. /* Now reset xgxs and IBC to complete the recovery */
  1227. dd->f_xgxs_reset(dd->pport);
  1228. }
  1229. /*
  1230. * Shut down the timer that polls for relock occasions, if needed
  1231. * this is "hooked" from qib_7220_quiet_serdes(), which is called
  1232. * just before qib_shutdown_device() in qib_driver.c shuts down all
  1233. * the other timers
  1234. */
  1235. void shutdown_7220_relock_poll(struct qib_devdata *dd)
  1236. {
  1237. if (dd->cspec->relock_timer_active)
  1238. del_timer_sync(&dd->cspec->relock_timer);
  1239. }
  1240. static unsigned qib_relock_by_timer = 1;
  1241. module_param_named(relock_by_timer, qib_relock_by_timer, uint,
  1242. S_IWUSR | S_IRUGO);
  1243. MODULE_PARM_DESC(relock_by_timer, "Allow relock attempt if link not up");
  1244. static void qib_run_relock(unsigned long opaque)
  1245. {
  1246. struct qib_devdata *dd = (struct qib_devdata *)opaque;
  1247. struct qib_pportdata *ppd = dd->pport;
  1248. struct qib_chip_specific *cs = dd->cspec;
  1249. int timeoff;
  1250. /*
  1251. * Check link-training state for "stuck" state, when down.
  1252. * if found, try relock and schedule another try at
  1253. * exponentially growing delay, maxed at one second.
  1254. * if not stuck, our work is done.
  1255. */
  1256. if ((dd->flags & QIB_INITTED) && !(ppd->lflags &
  1257. (QIBL_IB_AUTONEG_INPROG | QIBL_LINKINIT | QIBL_LINKARMED |
  1258. QIBL_LINKACTIVE))) {
  1259. if (qib_relock_by_timer) {
  1260. if (!(ppd->lflags & QIBL_IB_LINK_DISABLED))
  1261. toggle_7220_rclkrls(dd);
  1262. }
  1263. /* re-set timer for next check */
  1264. timeoff = cs->relock_interval << 1;
  1265. if (timeoff > HZ)
  1266. timeoff = HZ;
  1267. cs->relock_interval = timeoff;
  1268. } else
  1269. timeoff = HZ;
  1270. mod_timer(&cs->relock_timer, jiffies + timeoff);
  1271. }
  1272. void set_7220_relock_poll(struct qib_devdata *dd, int ibup)
  1273. {
  1274. struct qib_chip_specific *cs = dd->cspec;
  1275. if (ibup) {
  1276. /* We are now up, relax timer to 1 second interval */
  1277. if (cs->relock_timer_active) {
  1278. cs->relock_interval = HZ;
  1279. mod_timer(&cs->relock_timer, jiffies + HZ);
  1280. }
  1281. } else {
  1282. /* Transition to down, (re-)set timer to short interval. */
  1283. unsigned int timeout;
  1284. timeout = msecs_to_jiffies(RELOCK_FIRST_MS);
  1285. if (timeout == 0)
  1286. timeout = 1;
  1287. /* If timer has not yet been started, do so. */
  1288. if (!cs->relock_timer_active) {
  1289. cs->relock_timer_active = 1;
  1290. init_timer(&cs->relock_timer);
  1291. cs->relock_timer.function = qib_run_relock;
  1292. cs->relock_timer.data = (unsigned long) dd;
  1293. cs->relock_interval = timeout;
  1294. cs->relock_timer.expires = jiffies + timeout;
  1295. add_timer(&cs->relock_timer);
  1296. } else {
  1297. cs->relock_interval = timeout;
  1298. mod_timer(&cs->relock_timer, jiffies + timeout);
  1299. }
  1300. }
  1301. }