aiutils.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. /*
  2. * Misc utility routines for accessing chip-specific features
  3. * of the SiliconBackplane-based Broadcom chips.
  4. *
  5. * Copyright (C) 1999-2015, Broadcom Corporation
  6. *
  7. * Unless you and Broadcom execute a separate written software license
  8. * agreement governing use of this software, this software is licensed to you
  9. * under the terms of the GNU General Public License version 2 (the "GPL"),
  10. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  11. * following added to such license:
  12. *
  13. * As a special exception, the copyright holders of this software give you
  14. * permission to link this software with independent modules, and to copy and
  15. * distribute the resulting executable under terms of your choice, provided that
  16. * you also meet, for each linked independent module, the terms and conditions of
  17. * the license of that module. An independent module is a module which is not
  18. * derived from this software. The special exception does not apply to any
  19. * modifications of the software.
  20. *
  21. * Notwithstanding the above, under no circumstances may you combine this
  22. * software in any way with any other Broadcom software provided under a license
  23. * other than the GPL, without Broadcom's express prior written consent.
  24. *
  25. * $Id: aiutils.c 531050 2015-02-02 07:21:19Z $
  26. */
  27. #include <bcm_cfg.h>
  28. #include <typedefs.h>
  29. #include <bcmdefs.h>
  30. #include <osl.h>
  31. #include <bcmutils.h>
  32. #include <siutils.h>
  33. #include <hndsoc.h>
  34. #include <sbchipc.h>
  35. #include <pcicfg.h>
  36. #include "siutils_priv.h"
  37. #define BCM47162_DMP() (0)
  38. #define BCM5357_DMP() (0)
  39. #define BCM4707_DMP() (0)
  40. #define remap_coreid(sih, coreid) (coreid)
  41. #define remap_corerev(sih, corerev) (corerev)
  42. /* EROM parsing */
  43. static uint32
  44. get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
  45. {
  46. uint32 ent;
  47. uint inv = 0, nom = 0;
  48. uint32 size = 0;
  49. while (TRUE) {
  50. ent = R_REG(si_osh(sih), *eromptr);
  51. (*eromptr)++;
  52. if (mask == 0)
  53. break;
  54. if ((ent & ER_VALID) == 0) {
  55. inv++;
  56. continue;
  57. }
  58. if (ent == (ER_END | ER_VALID))
  59. break;
  60. if ((ent & mask) == match)
  61. break;
  62. /* escape condition related EROM size if it has invalid values */
  63. size += sizeof(*eromptr);
  64. if (size >= ER_SZ_MAX) {
  65. SI_ERROR(("Failed to find end of EROM marker\n"));
  66. break;
  67. }
  68. nom++;
  69. }
  70. SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
  71. if (inv + nom) {
  72. SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
  73. }
  74. return ent;
  75. }
  76. static uint32
  77. get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
  78. uint32 *sizel, uint32 *sizeh)
  79. {
  80. uint32 asd, sz, szd;
  81. asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
  82. if (((asd & ER_TAG1) != ER_ADD) ||
  83. (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
  84. ((asd & AD_ST_MASK) != st)) {
  85. /* This is not what we want, "push" it back */
  86. (*eromptr)--;
  87. return 0;
  88. }
  89. *addrl = asd & AD_ADDR_MASK;
  90. if (asd & AD_AG32)
  91. *addrh = get_erom_ent(sih, eromptr, 0, 0);
  92. else
  93. *addrh = 0;
  94. *sizeh = 0;
  95. sz = asd & AD_SZ_MASK;
  96. if (sz == AD_SZ_SZD) {
  97. szd = get_erom_ent(sih, eromptr, 0, 0);
  98. *sizel = szd & SD_SZ_MASK;
  99. if (szd & SD_SG32)
  100. *sizeh = get_erom_ent(sih, eromptr, 0, 0);
  101. } else
  102. *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
  103. SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
  104. sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
  105. return asd;
  106. }
  107. static void
  108. ai_hwfixup(si_info_t *sii)
  109. {
  110. }
  111. /* parse the enumeration rom to identify all cores */
  112. void
  113. ai_scan(si_t *sih, void *regs, uint devid)
  114. {
  115. si_info_t *sii = SI_INFO(sih);
  116. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  117. chipcregs_t *cc = (chipcregs_t *)regs;
  118. uint32 erombase, *eromptr, *eromlim;
  119. erombase = R_REG(sii->osh, &cc->eromptr);
  120. switch (BUSTYPE(sih->bustype)) {
  121. case SI_BUS:
  122. eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
  123. break;
  124. case PCI_BUS:
  125. /* Set wrappers address */
  126. sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
  127. /* Now point the window at the erom */
  128. OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
  129. eromptr = regs;
  130. break;
  131. case SPI_BUS:
  132. case SDIO_BUS:
  133. eromptr = (uint32 *)(uintptr)erombase;
  134. break;
  135. case PCMCIA_BUS:
  136. default:
  137. SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
  138. ASSERT(0);
  139. return;
  140. }
  141. eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
  142. SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
  143. regs, erombase, eromptr, eromlim));
  144. while (eromptr < eromlim) {
  145. uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
  146. uint32 mpd, asd, addrl, addrh, sizel, sizeh;
  147. uint i, j, idx;
  148. bool br;
  149. br = FALSE;
  150. /* Grok a component */
  151. cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
  152. if (cia == (ER_END | ER_VALID)) {
  153. SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
  154. ai_hwfixup(sii);
  155. return;
  156. }
  157. cib = get_erom_ent(sih, &eromptr, 0, 0);
  158. if ((cib & ER_TAG) != ER_CI) {
  159. SI_ERROR(("CIA not followed by CIB\n"));
  160. goto error;
  161. }
  162. cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
  163. mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
  164. crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
  165. nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
  166. nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
  167. nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
  168. nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
  169. #ifdef BCMDBG_SI
  170. SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
  171. "nsw = %d, nmp = %d & nsp = %d\n",
  172. mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp));
  173. #else
  174. BCM_REFERENCE(crev);
  175. #endif
  176. if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
  177. continue;
  178. if ((nmw + nsw == 0)) {
  179. /* A component which is not a core */
  180. if (cid == OOB_ROUTER_CORE_ID) {
  181. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
  182. &addrl, &addrh, &sizel, &sizeh);
  183. if (asd != 0) {
  184. sii->oob_router = addrl;
  185. }
  186. }
  187. if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID)
  188. continue;
  189. }
  190. idx = sii->numcores;
  191. cores_info->cia[idx] = cia;
  192. cores_info->cib[idx] = cib;
  193. cores_info->coreid[idx] = remap_coreid(sih, cid);
  194. for (i = 0; i < nmp; i++) {
  195. mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
  196. if ((mpd & ER_TAG) != ER_MP) {
  197. SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
  198. goto error;
  199. }
  200. SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
  201. (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
  202. (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
  203. }
  204. /* First Slave Address Descriptor should be port 0:
  205. * the main register space for the core
  206. */
  207. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
  208. if (asd == 0) {
  209. do {
  210. /* Try again to see if it is a bridge */
  211. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
  212. &sizel, &sizeh);
  213. if (asd != 0)
  214. br = TRUE;
  215. else {
  216. if (br == TRUE) {
  217. break;
  218. }
  219. else if ((addrh != 0) || (sizeh != 0) ||
  220. (sizel != SI_CORE_SIZE)) {
  221. SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
  222. "0x%x\n", addrh, sizeh, sizel));
  223. SI_ERROR(("First Slave ASD for"
  224. "core 0x%04x malformed "
  225. "(0x%08x)\n", cid, asd));
  226. goto error;
  227. }
  228. }
  229. } while (1);
  230. }
  231. cores_info->coresba[idx] = addrl;
  232. cores_info->coresba_size[idx] = sizel;
  233. /* Get any more ASDs in port 0 */
  234. j = 1;
  235. do {
  236. asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
  237. &sizel, &sizeh);
  238. if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
  239. cores_info->coresba2[idx] = addrl;
  240. cores_info->coresba2_size[idx] = sizel;
  241. }
  242. j++;
  243. } while (asd != 0);
  244. /* Go through the ASDs for other slave ports */
  245. for (i = 1; i < nsp; i++) {
  246. j = 0;
  247. do {
  248. asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
  249. &sizel, &sizeh);
  250. if (asd == 0)
  251. break;
  252. j++;
  253. } while (1);
  254. if (j == 0) {
  255. SI_ERROR((" SP %d has no address descriptors\n", i));
  256. goto error;
  257. }
  258. }
  259. /* Now get master wrappers */
  260. for (i = 0; i < nmw; i++) {
  261. asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
  262. &sizel, &sizeh);
  263. if (asd == 0) {
  264. SI_ERROR(("Missing descriptor for MW %d\n", i));
  265. goto error;
  266. }
  267. if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
  268. SI_ERROR(("Master wrapper %d is not 4KB\n", i));
  269. goto error;
  270. }
  271. if (i == 0)
  272. cores_info->wrapba[idx] = addrl;
  273. }
  274. /* And finally slave wrappers */
  275. for (i = 0; i < nsw; i++) {
  276. uint fwp = (nsp == 1) ? 0 : 1;
  277. asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
  278. &sizel, &sizeh);
  279. if (asd == 0) {
  280. SI_ERROR(("Missing descriptor for SW %d\n", i));
  281. goto error;
  282. }
  283. if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
  284. SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
  285. goto error;
  286. }
  287. if ((nmw == 0) && (i == 0))
  288. cores_info->wrapba[idx] = addrl;
  289. }
  290. /* Don't record bridges */
  291. if (br)
  292. continue;
  293. /* Done with core */
  294. sii->numcores++;
  295. }
  296. SI_ERROR(("Reached end of erom without finding END"));
  297. error:
  298. sii->numcores = 0;
  299. return;
  300. }
  301. /* This function changes the logical "focus" to the indicated core.
  302. * Return the current core's virtual address.
  303. */
  304. void *
  305. ai_setcoreidx(si_t *sih, uint coreidx)
  306. {
  307. si_info_t *sii = SI_INFO(sih);
  308. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  309. uint32 addr, wrap;
  310. void *regs;
  311. if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
  312. return (NULL);
  313. addr = cores_info->coresba[coreidx];
  314. wrap = cores_info->wrapba[coreidx];
  315. /*
  316. * If the user has provided an interrupt mask enabled function,
  317. * then assert interrupts are disabled before switching the core.
  318. */
  319. ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
  320. switch (BUSTYPE(sih->bustype)) {
  321. case SI_BUS:
  322. /* map new one */
  323. if (!cores_info->regs[coreidx]) {
  324. cores_info->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
  325. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  326. }
  327. sii->curmap = regs = cores_info->regs[coreidx];
  328. if (!cores_info->wrappers[coreidx] && (wrap != 0)) {
  329. cores_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
  330. ASSERT(GOODREGS(cores_info->wrappers[coreidx]));
  331. }
  332. sii->curwrap = cores_info->wrappers[coreidx];
  333. break;
  334. case PCI_BUS:
  335. /* point bar0 window */
  336. OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr);
  337. regs = sii->curmap;
  338. /* point bar0 2nd 4KB window to the primary wrapper */
  339. if (PCIE_GEN2(sii))
  340. OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap);
  341. else
  342. OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap);
  343. break;
  344. case SPI_BUS:
  345. case SDIO_BUS:
  346. sii->curmap = regs = (void *)((uintptr)addr);
  347. sii->curwrap = (void *)((uintptr)wrap);
  348. break;
  349. case PCMCIA_BUS:
  350. default:
  351. ASSERT(0);
  352. regs = NULL;
  353. break;
  354. }
  355. sii->curmap = regs;
  356. sii->curidx = coreidx;
  357. return regs;
  358. }
  359. void
  360. ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
  361. {
  362. si_info_t *sii = SI_INFO(sih);
  363. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  364. chipcregs_t *cc = NULL;
  365. uint32 erombase, *eromptr, *eromlim;
  366. uint i, j, cidx;
  367. uint32 cia, cib, nmp, nsp;
  368. uint32 asd, addrl, addrh, sizel, sizeh;
  369. for (i = 0; i < sii->numcores; i++) {
  370. if (cores_info->coreid[i] == CC_CORE_ID) {
  371. cc = (chipcregs_t *)cores_info->regs[i];
  372. break;
  373. }
  374. }
  375. if (cc == NULL)
  376. goto error;
  377. erombase = R_REG(sii->osh, &cc->eromptr);
  378. eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
  379. eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
  380. cidx = sii->curidx;
  381. cia = cores_info->cia[cidx];
  382. cib = cores_info->cib[cidx];
  383. nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
  384. nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
  385. /* scan for cores */
  386. while (eromptr < eromlim) {
  387. if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
  388. (get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
  389. break;
  390. }
  391. }
  392. /* skip master ports */
  393. for (i = 0; i < nmp; i++)
  394. get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
  395. /* Skip ASDs in port 0 */
  396. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
  397. if (asd == 0) {
  398. /* Try again to see if it is a bridge */
  399. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
  400. &sizel, &sizeh);
  401. }
  402. j = 1;
  403. do {
  404. asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
  405. &sizel, &sizeh);
  406. j++;
  407. } while (asd != 0);
  408. /* Go through the ASDs for other slave ports */
  409. for (i = 1; i < nsp; i++) {
  410. j = 0;
  411. do {
  412. asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
  413. &sizel, &sizeh);
  414. if (asd == 0)
  415. break;
  416. if (!asidx--) {
  417. *addr = addrl;
  418. *size = sizel;
  419. return;
  420. }
  421. j++;
  422. } while (1);
  423. if (j == 0) {
  424. SI_ERROR((" SP %d has no address descriptors\n", i));
  425. break;
  426. }
  427. }
  428. error:
  429. *size = 0;
  430. return;
  431. }
  432. /* Return the number of address spaces in current core */
  433. int
  434. ai_numaddrspaces(si_t *sih)
  435. {
  436. return 2;
  437. }
  438. /* Return the address of the nth address space in the current core */
  439. uint32
  440. ai_addrspace(si_t *sih, uint asidx)
  441. {
  442. si_info_t *sii = SI_INFO(sih);
  443. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  444. uint cidx;
  445. cidx = sii->curidx;
  446. if (asidx == 0)
  447. return cores_info->coresba[cidx];
  448. else if (asidx == 1)
  449. return cores_info->coresba2[cidx];
  450. else {
  451. SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
  452. __FUNCTION__, asidx));
  453. return 0;
  454. }
  455. }
  456. /* Return the size of the nth address space in the current core */
  457. uint32
  458. ai_addrspacesize(si_t *sih, uint asidx)
  459. {
  460. si_info_t *sii = SI_INFO(sih);
  461. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  462. uint cidx;
  463. cidx = sii->curidx;
  464. if (asidx == 0)
  465. return cores_info->coresba_size[cidx];
  466. else if (asidx == 1)
  467. return cores_info->coresba2_size[cidx];
  468. else {
  469. SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
  470. __FUNCTION__, asidx));
  471. return 0;
  472. }
  473. }
  474. uint
  475. ai_flag(si_t *sih)
  476. {
  477. si_info_t *sii = SI_INFO(sih);
  478. aidmp_t *ai;
  479. if (BCM47162_DMP()) {
  480. SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
  481. return sii->curidx;
  482. }
  483. if (BCM5357_DMP()) {
  484. SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
  485. return sii->curidx;
  486. }
  487. if (BCM4707_DMP()) {
  488. SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
  489. __FUNCTION__));
  490. return sii->curidx;
  491. }
  492. ai = sii->curwrap;
  493. return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
  494. }
  495. uint
  496. ai_flag_alt(si_t *sih)
  497. {
  498. si_info_t *sii = SI_INFO(sih);
  499. aidmp_t *ai;
  500. if (BCM47162_DMP()) {
  501. SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
  502. return sii->curidx;
  503. }
  504. if (BCM5357_DMP()) {
  505. SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
  506. return sii->curidx;
  507. }
  508. if (BCM4707_DMP()) {
  509. SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
  510. __FUNCTION__));
  511. return sii->curidx;
  512. }
  513. ai = sii->curwrap;
  514. return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
  515. }
  516. void
  517. ai_setint(si_t *sih, int siflag)
  518. {
  519. }
  520. uint
  521. ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
  522. {
  523. si_info_t *sii = SI_INFO(sih);
  524. uint32 *map = (uint32 *) sii->curwrap;
  525. if (mask || val) {
  526. uint32 w = R_REG(sii->osh, map+(offset/4));
  527. w &= ~mask;
  528. w |= val;
  529. W_REG(sii->osh, map+(offset/4), w);
  530. }
  531. return (R_REG(sii->osh, map+(offset/4)));
  532. }
  533. uint
  534. ai_corevendor(si_t *sih)
  535. {
  536. si_info_t *sii = SI_INFO(sih);
  537. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  538. uint32 cia;
  539. cia = cores_info->cia[sii->curidx];
  540. return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
  541. }
  542. uint
  543. ai_corerev(si_t *sih)
  544. {
  545. si_info_t *sii = SI_INFO(sih);
  546. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  547. uint32 cib;
  548. cib = cores_info->cib[sii->curidx];
  549. return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
  550. }
  551. bool
  552. ai_iscoreup(si_t *sih)
  553. {
  554. si_info_t *sii = SI_INFO(sih);
  555. aidmp_t *ai;
  556. ai = sii->curwrap;
  557. return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
  558. ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
  559. }
  560. /*
  561. * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
  562. * switch back to the original core, and return the new value.
  563. *
  564. * When using the silicon backplane, no fiddling with interrupts or core switches is needed.
  565. *
  566. * Also, when using pci/pcie, we can optimize away the core switching for pci registers
  567. * and (on newer pci cores) chipcommon registers.
  568. */
  569. uint
  570. ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
  571. {
  572. uint origidx = 0;
  573. uint32 *r = NULL;
  574. uint w;
  575. uint intr_val = 0;
  576. bool fast = FALSE;
  577. si_info_t *sii = SI_INFO(sih);
  578. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  579. ASSERT(GOODIDX(coreidx));
  580. ASSERT(regoff < SI_CORE_SIZE);
  581. ASSERT((val & ~mask) == 0);
  582. if (coreidx >= SI_MAXCORES)
  583. return 0;
  584. if (BUSTYPE(sih->bustype) == SI_BUS) {
  585. /* If internal bus, we can always get at everything */
  586. fast = TRUE;
  587. /* map if does not exist */
  588. if (!cores_info->regs[coreidx]) {
  589. cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
  590. SI_CORE_SIZE);
  591. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  592. }
  593. r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
  594. } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
  595. /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
  596. if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
  597. /* Chipc registers are mapped at 12KB */
  598. fast = TRUE;
  599. r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
  600. } else if (sii->pub.buscoreidx == coreidx) {
  601. /* pci registers are at either in the last 2KB of an 8KB window
  602. * or, in pcie and pci rev 13 at 8KB
  603. */
  604. fast = TRUE;
  605. if (SI_FAST(sii))
  606. r = (uint32 *)((char *)sii->curmap +
  607. PCI_16KB0_PCIREGS_OFFSET + regoff);
  608. else
  609. r = (uint32 *)((char *)sii->curmap +
  610. ((regoff >= SBCONFIGOFF) ?
  611. PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
  612. regoff);
  613. }
  614. }
  615. if (!fast) {
  616. INTR_OFF(sii, intr_val);
  617. /* save current core index */
  618. origidx = si_coreidx(&sii->pub);
  619. /* switch core */
  620. r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
  621. }
  622. ASSERT(r != NULL);
  623. /* mask and set */
  624. if (mask || val) {
  625. w = (R_REG(sii->osh, r) & ~mask) | val;
  626. W_REG(sii->osh, r, w);
  627. }
  628. /* readback */
  629. w = R_REG(sii->osh, r);
  630. if (!fast) {
  631. /* restore core index */
  632. if (origidx != coreidx)
  633. ai_setcoreidx(&sii->pub, origidx);
  634. INTR_RESTORE(sii, intr_val);
  635. }
  636. return (w);
  637. }
  638. /*
  639. * If there is no need for fiddling with interrupts or core switches (typically silicon
  640. * back plane registers, pci registers and chipcommon registers), this function
  641. * returns the register offset on this core to a mapped address. This address can
  642. * be used for W_REG/R_REG directly.
  643. *
  644. * For accessing registers that would need a core switch, this function will return
  645. * NULL.
  646. */
  647. uint32 *
  648. ai_corereg_addr(si_t *sih, uint coreidx, uint regoff)
  649. {
  650. uint32 *r = NULL;
  651. bool fast = FALSE;
  652. si_info_t *sii = SI_INFO(sih);
  653. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  654. ASSERT(GOODIDX(coreidx));
  655. ASSERT(regoff < SI_CORE_SIZE);
  656. if (coreidx >= SI_MAXCORES)
  657. return 0;
  658. if (BUSTYPE(sih->bustype) == SI_BUS) {
  659. /* If internal bus, we can always get at everything */
  660. fast = TRUE;
  661. /* map if does not exist */
  662. if (!cores_info->regs[coreidx]) {
  663. cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
  664. SI_CORE_SIZE);
  665. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  666. }
  667. r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
  668. } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
  669. /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
  670. if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
  671. /* Chipc registers are mapped at 12KB */
  672. fast = TRUE;
  673. r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
  674. } else if (sii->pub.buscoreidx == coreidx) {
  675. /* pci registers are at either in the last 2KB of an 8KB window
  676. * or, in pcie and pci rev 13 at 8KB
  677. */
  678. fast = TRUE;
  679. if (SI_FAST(sii))
  680. r = (uint32 *)((char *)sii->curmap +
  681. PCI_16KB0_PCIREGS_OFFSET + regoff);
  682. else
  683. r = (uint32 *)((char *)sii->curmap +
  684. ((regoff >= SBCONFIGOFF) ?
  685. PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
  686. regoff);
  687. }
  688. }
  689. if (!fast)
  690. return 0;
  691. return (r);
  692. }
  693. void
  694. ai_core_disable(si_t *sih, uint32 bits)
  695. {
  696. si_info_t *sii = SI_INFO(sih);
  697. volatile uint32 dummy;
  698. uint32 status;
  699. aidmp_t *ai;
  700. ASSERT(GOODREGS(sii->curwrap));
  701. ai = sii->curwrap;
  702. /* if core is already in reset, just return */
  703. if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
  704. return;
  705. /* ensure there are no pending backplane operations */
  706. SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
  707. /* if pending backplane ops still, try waiting longer */
  708. if (status != 0) {
  709. /* 300usecs was sufficient to allow backplane ops to clear for big hammer */
  710. /* during driver load we may need more time */
  711. SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
  712. /* if still pending ops, continue on and try disable anyway */
  713. /* this is in big hammer path, so don't call wl_reinit in this case... */
  714. }
  715. W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
  716. dummy = R_REG(sii->osh, &ai->resetctrl);
  717. BCM_REFERENCE(dummy);
  718. OSL_DELAY(1);
  719. W_REG(sii->osh, &ai->ioctrl, bits);
  720. dummy = R_REG(sii->osh, &ai->ioctrl);
  721. BCM_REFERENCE(dummy);
  722. OSL_DELAY(10);
  723. }
  724. /* reset and re-enable a core
  725. * inputs:
  726. * bits - core specific bits that are set during and after reset sequence
  727. * resetbits - core specific bits that are set only during reset sequence
  728. */
  729. void
  730. ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
  731. {
  732. si_info_t *sii = SI_INFO(sih);
  733. aidmp_t *ai;
  734. volatile uint32 dummy;
  735. uint loop_counter = 10;
  736. ASSERT(GOODREGS(sii->curwrap));
  737. ai = sii->curwrap;
  738. /* ensure there are no pending backplane operations */
  739. SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
  740. /* put core into reset state */
  741. W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
  742. OSL_DELAY(10);
  743. /* ensure there are no pending backplane operations */
  744. SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
  745. W_REG(sii->osh, &ai->ioctrl, (bits | resetbits | SICF_FGC | SICF_CLOCK_EN));
  746. dummy = R_REG(sii->osh, &ai->ioctrl);
  747. BCM_REFERENCE(dummy);
  748. /* ensure there are no pending backplane operations */
  749. SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
  750. while (R_REG(sii->osh, &ai->resetctrl) != 0 && --loop_counter != 0) {
  751. /* ensure there are no pending backplane operations */
  752. SPINWAIT(((dummy = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
  753. /* take core out of reset */
  754. W_REG(sii->osh, &ai->resetctrl, 0);
  755. /* ensure there are no pending backplane operations */
  756. SPINWAIT((R_REG(sii->osh, &ai->resetstatus) != 0), 300);
  757. }
  758. W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
  759. dummy = R_REG(sii->osh, &ai->ioctrl);
  760. BCM_REFERENCE(dummy);
  761. OSL_DELAY(1);
  762. }
  763. void
  764. ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
  765. {
  766. si_info_t *sii = SI_INFO(sih);
  767. aidmp_t *ai;
  768. uint32 w;
  769. if (BCM47162_DMP()) {
  770. SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
  771. __FUNCTION__));
  772. return;
  773. }
  774. if (BCM5357_DMP()) {
  775. SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
  776. __FUNCTION__));
  777. return;
  778. }
  779. if (BCM4707_DMP()) {
  780. SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
  781. __FUNCTION__));
  782. return;
  783. }
  784. ASSERT(GOODREGS(sii->curwrap));
  785. ai = sii->curwrap;
  786. ASSERT((val & ~mask) == 0);
  787. if (mask || val) {
  788. w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
  789. W_REG(sii->osh, &ai->ioctrl, w);
  790. }
  791. }
  792. uint32
  793. ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
  794. {
  795. si_info_t *sii = SI_INFO(sih);
  796. aidmp_t *ai;
  797. uint32 w;
  798. if (BCM47162_DMP()) {
  799. SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
  800. __FUNCTION__));
  801. return 0;
  802. }
  803. if (BCM5357_DMP()) {
  804. SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
  805. __FUNCTION__));
  806. return 0;
  807. }
  808. if (BCM4707_DMP()) {
  809. SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
  810. __FUNCTION__));
  811. return 0;
  812. }
  813. ASSERT(GOODREGS(sii->curwrap));
  814. ai = sii->curwrap;
  815. ASSERT((val & ~mask) == 0);
  816. if (mask || val) {
  817. w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
  818. W_REG(sii->osh, &ai->ioctrl, w);
  819. }
  820. return R_REG(sii->osh, &ai->ioctrl);
  821. }
  822. uint32
  823. ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
  824. {
  825. si_info_t *sii = SI_INFO(sih);
  826. aidmp_t *ai;
  827. uint32 w;
  828. if (BCM47162_DMP()) {
  829. SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0",
  830. __FUNCTION__));
  831. return 0;
  832. }
  833. if (BCM5357_DMP()) {
  834. SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
  835. __FUNCTION__));
  836. return 0;
  837. }
  838. if (BCM4707_DMP()) {
  839. SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
  840. __FUNCTION__));
  841. return 0;
  842. }
  843. ASSERT(GOODREGS(sii->curwrap));
  844. ai = sii->curwrap;
  845. ASSERT((val & ~mask) == 0);
  846. ASSERT((mask & ~SISF_CORE_BITS) == 0);
  847. if (mask || val) {
  848. w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
  849. W_REG(sii->osh, &ai->iostatus, w);
  850. }
  851. return R_REG(sii->osh, &ai->iostatus);
  852. }