aiutils.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. /*
  2. * Misc utility routines for accessing chip-specific features
  3. * of the SiliconBackplane-based Broadcom chips.
  4. *
  5. * Copyright (C) 1999-2010, Broadcom Corporation
  6. *
  7. * Unless you and Broadcom execute a separate written software license
  8. * agreement governing use of this software, this software is licensed to you
  9. * under the terms of the GNU General Public License version 2 (the "GPL"),
  10. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  11. * following added to such license:
  12. *
  13. * As a special exception, the copyright holders of this software give you
  14. * permission to link this software with independent modules, and to copy and
  15. * distribute the resulting executable under terms of your choice, provided that
  16. * you also meet, for each linked independent module, the terms and conditions of
  17. * the license of that module. An independent module is a module which is not
  18. * derived from this software. The special exception does not apply to any
  19. * modifications of the software.
  20. *
  21. * Notwithstanding the above, under no circumstances may you combine this
  22. * software in any way with any other Broadcom software provided under a license
  23. * other than the GPL, without Broadcom's express prior written consent.
  24. *
  25. * $Id: aiutils.c,v 1.6.4.7.4.6 2010/04/21 20:43:47 Exp $
  26. */
  27. #include <typedefs.h>
  28. #include <bcmdefs.h>
  29. #include <osl.h>
  30. #include <bcmutils.h>
  31. #include <siutils.h>
  32. #include <hndsoc.h>
  33. #include <sbchipc.h>
  34. #include <pcicfg.h>
  35. #include "siutils_priv.h"
  36. STATIC uint32
  37. get_asd(si_t *sih, uint32 *eromptr, uint sp, uint ad, uint st,
  38. uint32 *addrl, uint32 *addrh, uint32 *sizel, uint32 *sizeh);
  39. /* EROM parsing */
  40. static uint32
  41. get_erom_ent(si_t *sih, uint32 *eromptr, uint32 mask, uint32 match)
  42. {
  43. uint32 ent;
  44. uint inv = 0, nom = 0;
  45. while (TRUE) {
  46. ent = R_REG(si_osh(sih), (uint32 *)(uintptr)(*eromptr));
  47. *eromptr += sizeof(uint32);
  48. if (mask == 0)
  49. break;
  50. if ((ent & ER_VALID) == 0) {
  51. inv++;
  52. continue;
  53. }
  54. if (ent == (ER_END | ER_VALID))
  55. break;
  56. if ((ent & mask) == match)
  57. break;
  58. nom++;
  59. }
  60. SI_MSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
  61. if (inv + nom)
  62. SI_MSG((" after %d invalid and %d non-matching entries\n", inv, nom));
  63. return ent;
  64. }
  65. STATIC uint32
  66. get_asd(si_t *sih, uint32 *eromptr, uint sp, uint ad, uint st,
  67. uint32 *addrl, uint32 *addrh, uint32 *sizel, uint32 *sizeh)
  68. {
  69. uint32 asd, sz, szd;
  70. asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
  71. if (((asd & ER_TAG1) != ER_ADD) ||
  72. (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
  73. ((asd & AD_ST_MASK) != st)) {
  74. /* This is not what we want, "push" it back */
  75. *eromptr -= sizeof(uint32);
  76. return 0;
  77. }
  78. *addrl = asd & AD_ADDR_MASK;
  79. if (asd & AD_AG32)
  80. *addrh = get_erom_ent(sih, eromptr, 0, 0);
  81. else
  82. *addrh = 0;
  83. *sizeh = 0;
  84. sz = asd & AD_SZ_MASK;
  85. if (sz == AD_SZ_SZD) {
  86. szd = get_erom_ent(sih, eromptr, 0, 0);
  87. *sizel = szd & SD_SZ_MASK;
  88. if (szd & SD_SG32)
  89. *sizeh = get_erom_ent(sih, eromptr, 0, 0);
  90. } else
  91. *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
  92. SI_MSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
  93. sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
  94. return asd;
  95. }
  96. /* parse the enumeration rom to identify all cores */
  97. void
  98. ai_scan(si_t *sih, void *regs, uint devid)
  99. {
  100. si_info_t *sii = SI_INFO(sih);
  101. chipcregs_t *cc = (chipcregs_t *)regs;
  102. uint32 erombase, eromptr, eromlim;
  103. erombase = R_REG(sii->osh, &cc->eromptr);
  104. switch (BUSTYPE(sih->bustype)) {
  105. case SI_BUS:
  106. eromptr = (uintptr)REG_MAP(erombase, SI_CORE_SIZE);
  107. break;
  108. case PCI_BUS:
  109. /* Set wrappers address */
  110. sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
  111. /* Now point the window at the erom */
  112. OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
  113. eromptr = (uint32)(uintptr)regs;
  114. break;
  115. case SPI_BUS:
  116. case SDIO_BUS:
  117. eromptr = erombase;
  118. break;
  119. case PCMCIA_BUS:
  120. default:
  121. SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
  122. ASSERT(0);
  123. return;
  124. }
  125. eromlim = eromptr + ER_REMAPCONTROL;
  126. SI_MSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%08x, eromlim = 0x%08x\n",
  127. regs, erombase, eromptr, eromlim));
  128. while (eromptr < eromlim) {
  129. uint32 cia, cib, base, cid, mfg, crev, nmw, nsw, nmp, nsp;
  130. uint32 mpd, asd, addrl, addrh, sizel, sizeh;
  131. uint i, j, idx;
  132. bool br;
  133. br = FALSE;
  134. /* Grok a component */
  135. cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
  136. if (cia == (ER_END | ER_VALID)) {
  137. SI_MSG(("Found END of erom after %d cores\n", sii->numcores));
  138. return;
  139. }
  140. base = eromptr - sizeof(uint32);
  141. cib = get_erom_ent(sih, &eromptr, 0, 0);
  142. if ((cib & ER_TAG) != ER_CI) {
  143. SI_ERROR(("CIA not followed by CIB\n"));
  144. goto error;
  145. }
  146. cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
  147. mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
  148. crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
  149. nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
  150. nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
  151. nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
  152. nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
  153. SI_MSG(("Found component 0x%04x/0x%4x rev %d at erom addr 0x%08x, with nmw = %d, "
  154. "nsw = %d, nmp = %d & nsp = %d\n",
  155. mfg, cid, crev, base, nmw, nsw, nmp, nsp));
  156. if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
  157. continue;
  158. if ((nmw + nsw == 0)) {
  159. /* A component which is not a core */
  160. if (cid == OOB_ROUTER_CORE_ID) {
  161. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
  162. &addrl, &addrh, &sizel, &sizeh);
  163. if (asd != 0) {
  164. sii->common_info->oob_router = addrl;
  165. }
  166. }
  167. continue;
  168. }
  169. idx = sii->numcores;
  170. /* sii->eromptr[idx] = base; */
  171. sii->common_info->cia[idx] = cia;
  172. sii->common_info->cib[idx] = cib;
  173. sii->common_info->coreid[idx] = cid;
  174. for (i = 0; i < nmp; i++) {
  175. mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
  176. if ((mpd & ER_TAG) != ER_MP) {
  177. SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
  178. goto error;
  179. }
  180. SI_MSG((" Master port %d, mp: %d id: %d\n", i,
  181. (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
  182. (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
  183. }
  184. /* First Slave Address Descriptor should be port 0:
  185. * the main register space for the core
  186. */
  187. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
  188. if (asd == 0) {
  189. /* Try again to see if it is a bridge */
  190. asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
  191. &sizel, &sizeh);
  192. if (asd != 0)
  193. br = TRUE;
  194. else
  195. if ((addrh != 0) || (sizeh != 0) || (sizel != SI_CORE_SIZE)) {
  196. SI_ERROR(("First Slave ASD for core 0x%04x malformed "
  197. "(0x%08x)\n", cid, asd));
  198. goto error;
  199. }
  200. }
  201. sii->common_info->coresba[idx] = addrl;
  202. sii->common_info->coresba_size[idx] = sizel;
  203. /* Get any more ASDs in port 0 */
  204. j = 1;
  205. do {
  206. asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
  207. &sizel, &sizeh);
  208. if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE))
  209. sii->common_info->coresba2[idx] = addrl;
  210. sii->common_info->coresba2_size[idx] = sizel;
  211. j++;
  212. } while (asd != 0);
  213. /* Go through the ASDs for other slave ports */
  214. for (i = 1; i < nsp; i++) {
  215. j = 0;
  216. do {
  217. asd = get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE, &addrl, &addrh,
  218. &sizel, &sizeh);
  219. } while (asd != 0);
  220. if (j == 0) {
  221. SI_ERROR((" SP %d has no address descriptors\n", i));
  222. goto error;
  223. }
  224. }
  225. /* Now get master wrappers */
  226. for (i = 0; i < nmw; i++) {
  227. asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
  228. &sizel, &sizeh);
  229. if (asd == 0) {
  230. SI_ERROR(("Missing descriptor for MW %d\n", i));
  231. goto error;
  232. }
  233. if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
  234. SI_ERROR(("Master wrapper %d is not 4KB\n", i));
  235. goto error;
  236. }
  237. if (i == 0)
  238. sii->common_info->wrapba[idx] = addrl;
  239. }
  240. /* And finally slave wrappers */
  241. for (i = 0; i < nsw; i++) {
  242. uint fwp = (nsp == 1) ? 0 : 1;
  243. asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
  244. &sizel, &sizeh);
  245. if (asd == 0) {
  246. SI_ERROR(("Missing descriptor for SW %d\n", i));
  247. goto error;
  248. }
  249. if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
  250. SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
  251. goto error;
  252. }
  253. if ((nmw == 0) && (i == 0))
  254. sii->common_info->wrapba[idx] = addrl;
  255. }
  256. /* Don't record bridges */
  257. if (br)
  258. continue;
  259. /* Done with core */
  260. sii->numcores++;
  261. }
  262. SI_ERROR(("Reached end of erom without finding END"));
  263. error:
  264. sii->numcores = 0;
  265. return;
  266. }
  267. /* This function changes the logical "focus" to the indicated core.
  268. * Return the current core's virtual address.
  269. */
  270. void *
  271. ai_setcoreidx(si_t *sih, uint coreidx)
  272. {
  273. si_info_t *sii = SI_INFO(sih);
  274. uint32 addr = sii->common_info->coresba[coreidx];
  275. uint32 wrap = sii->common_info->wrapba[coreidx];
  276. void *regs;
  277. if (coreidx >= sii->numcores)
  278. return (NULL);
  279. /*
  280. * If the user has provided an interrupt mask enabled function,
  281. * then assert interrupts are disabled before switching the core.
  282. */
  283. ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
  284. switch (BUSTYPE(sih->bustype)) {
  285. case SI_BUS:
  286. /* map new one */
  287. if (!sii->common_info->regs[coreidx]) {
  288. sii->common_info->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
  289. ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
  290. }
  291. sii->curmap = regs = sii->common_info->regs[coreidx];
  292. if (!sii->common_info->wrappers[coreidx]) {
  293. sii->common_info->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
  294. ASSERT(GOODREGS(sii->common_info->wrappers[coreidx]));
  295. }
  296. sii->curwrap = sii->common_info->wrappers[coreidx];
  297. break;
  298. case SPI_BUS:
  299. case SDIO_BUS:
  300. sii->curmap = regs = (void *)((uintptr)addr);
  301. sii->curwrap = (void *)((uintptr)wrap);
  302. break;
  303. case PCMCIA_BUS:
  304. default:
  305. ASSERT(0);
  306. regs = NULL;
  307. break;
  308. }
  309. sii->curmap = regs;
  310. sii->curidx = coreidx;
  311. return regs;
  312. }
  313. /* Return the number of address spaces in current core */
  314. int
  315. ai_numaddrspaces(si_t *sih)
  316. {
  317. return 2;
  318. }
  319. /* Return the address of the nth address space in the current core */
  320. uint32
  321. ai_addrspace(si_t *sih, uint asidx)
  322. {
  323. si_info_t *sii;
  324. uint cidx;
  325. sii = SI_INFO(sih);
  326. cidx = sii->curidx;
  327. if (asidx == 0)
  328. return sii->common_info->coresba[cidx];
  329. else if (asidx == 1)
  330. return sii->common_info->coresba2[cidx];
  331. else {
  332. SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
  333. __FUNCTION__, asidx));
  334. return 0;
  335. }
  336. }
  337. /* Return the size of the nth address space in the current core */
  338. uint32
  339. ai_addrspacesize(si_t *sih, uint asidx)
  340. {
  341. si_info_t *sii;
  342. uint cidx;
  343. sii = SI_INFO(sih);
  344. cidx = sii->curidx;
  345. if (asidx == 0)
  346. return sii->common_info->coresba_size[cidx];
  347. else if (asidx == 1)
  348. return sii->common_info->coresba2_size[cidx];
  349. else {
  350. SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
  351. __FUNCTION__, asidx));
  352. return 0;
  353. }
  354. }
  355. uint
  356. ai_flag(si_t *sih)
  357. {
  358. si_info_t *sii;
  359. aidmp_t *ai;
  360. sii = SI_INFO(sih);
  361. ai = sii->curwrap;
  362. return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
  363. }
  364. void
  365. ai_setint(si_t *sih, int siflag)
  366. {
  367. }
  368. void
  369. ai_write_wrap_reg(si_t *sih, uint32 offset, uint32 val)
  370. {
  371. si_info_t *sii = SI_INFO(sih);
  372. aidmp_t *ai = sii->curwrap;
  373. W_REG(sii->osh, (uint32 *)((uint8 *)ai+offset), val);
  374. return;
  375. }
  376. uint
  377. ai_corevendor(si_t *sih)
  378. {
  379. si_info_t *sii;
  380. uint32 cia;
  381. sii = SI_INFO(sih);
  382. cia = sii->common_info->cia[sii->curidx];
  383. return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
  384. }
  385. uint
  386. ai_corerev(si_t *sih)
  387. {
  388. si_info_t *sii;
  389. uint32 cib;
  390. sii = SI_INFO(sih);
  391. cib = sii->common_info->cib[sii->curidx];
  392. return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
  393. }
  394. bool
  395. ai_iscoreup(si_t *sih)
  396. {
  397. si_info_t *sii;
  398. aidmp_t *ai;
  399. sii = SI_INFO(sih);
  400. ai = sii->curwrap;
  401. return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
  402. ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
  403. }
  404. /*
  405. * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
  406. * switch back to the original core, and return the new value.
  407. *
  408. * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
  409. *
  410. * Also, when using pci/pcie, we can optimize away the core switching for pci registers
  411. * and (on newer pci cores) chipcommon registers.
  412. */
  413. uint
  414. ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
  415. {
  416. uint origidx = 0;
  417. uint32 *r = NULL;
  418. uint w;
  419. uint intr_val = 0;
  420. bool fast = FALSE;
  421. si_info_t *sii;
  422. sii = SI_INFO(sih);
  423. ASSERT(GOODIDX(coreidx));
  424. ASSERT(regoff < SI_CORE_SIZE);
  425. ASSERT((val & ~mask) == 0);
  426. if (coreidx >= SI_MAXCORES)
  427. return 0;
  428. if (BUSTYPE(sih->bustype) == SI_BUS) {
  429. /* If internal bus, we can always get at everything */
  430. fast = TRUE;
  431. /* map if does not exist */
  432. if (!sii->common_info->wrappers[coreidx]) {
  433. sii->common_info->regs[coreidx] =
  434. REG_MAP(sii->common_info->coresba[coreidx], SI_CORE_SIZE);
  435. ASSERT(GOODREGS(sii->common_info->regs[coreidx]));
  436. }
  437. r = (uint32 *)((uchar *)sii->common_info->regs[coreidx] + regoff);
  438. } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
  439. /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
  440. if ((sii->common_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
  441. /* Chipc registers are mapped at 12KB */
  442. fast = TRUE;
  443. r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
  444. } else if (sii->pub.buscoreidx == coreidx) {
  445. /* pci registers are at either in the last 2KB of an 8KB window
  446. * or, in pcie and pci rev 13 at 8KB
  447. */
  448. fast = TRUE;
  449. if (SI_FAST(sii))
  450. r = (uint32 *)((char *)sii->curmap +
  451. PCI_16KB0_PCIREGS_OFFSET + regoff);
  452. else
  453. r = (uint32 *)((char *)sii->curmap +
  454. ((regoff >= SBCONFIGOFF) ?
  455. PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
  456. regoff);
  457. }
  458. }
  459. if (!fast) {
  460. INTR_OFF(sii, intr_val);
  461. /* save current core index */
  462. origidx = si_coreidx(&sii->pub);
  463. /* switch core */
  464. r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
  465. }
  466. ASSERT(r != NULL);
  467. /* mask and set */
  468. if (mask || val) {
  469. w = (R_REG(sii->osh, r) & ~mask) | val;
  470. W_REG(sii->osh, r, w);
  471. }
  472. /* readback */
  473. w = R_REG(sii->osh, r);
  474. if (!fast) {
  475. /* restore core index */
  476. if (origidx != coreidx)
  477. ai_setcoreidx(&sii->pub, origidx);
  478. INTR_RESTORE(sii, intr_val);
  479. }
  480. return (w);
  481. }
  482. void
  483. ai_core_disable(si_t *sih, uint32 bits)
  484. {
  485. si_info_t *sii;
  486. volatile uint32 dummy;
  487. aidmp_t *ai;
  488. sii = SI_INFO(sih);
  489. ASSERT(GOODREGS(sii->curwrap));
  490. ai = sii->curwrap;
  491. /* if core is already in reset, just return */
  492. if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
  493. return;
  494. W_REG(sii->osh, &ai->ioctrl, bits);
  495. dummy = R_REG(sii->osh, &ai->ioctrl);
  496. OSL_DELAY(10);
  497. W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
  498. OSL_DELAY(1);
  499. }
  500. /* reset and re-enable a core
  501. * inputs:
  502. * bits - core specific bits that are set during and after reset sequence
  503. * resetbits - core specific bits that are set only during reset sequence
  504. */
  505. void
  506. ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
  507. {
  508. si_info_t *sii;
  509. aidmp_t *ai;
  510. volatile uint32 dummy;
  511. sii = SI_INFO(sih);
  512. ASSERT(GOODREGS(sii->curwrap));
  513. ai = sii->curwrap;
  514. /*
  515. * Must do the disable sequence first to work for arbitrary current core state.
  516. */
  517. ai_core_disable(sih, (bits | resetbits));
  518. /*
  519. * Now do the initialization sequence.
  520. */
  521. W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
  522. dummy = R_REG(sii->osh, &ai->ioctrl);
  523. W_REG(sii->osh, &ai->resetctrl, 0);
  524. OSL_DELAY(1);
  525. W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
  526. dummy = R_REG(sii->osh, &ai->ioctrl);
  527. OSL_DELAY(1);
  528. }
  529. void
  530. ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
  531. {
  532. si_info_t *sii;
  533. aidmp_t *ai;
  534. uint32 w;
  535. sii = SI_INFO(sih);
  536. ASSERT(GOODREGS(sii->curwrap));
  537. ai = sii->curwrap;
  538. ASSERT((val & ~mask) == 0);
  539. if (mask || val) {
  540. w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
  541. W_REG(sii->osh, &ai->ioctrl, w);
  542. }
  543. }
  544. uint32
  545. ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
  546. {
  547. si_info_t *sii;
  548. aidmp_t *ai;
  549. uint32 w;
  550. sii = SI_INFO(sih);
  551. ASSERT(GOODREGS(sii->curwrap));
  552. ai = sii->curwrap;
  553. ASSERT((val & ~mask) == 0);
  554. if (mask || val) {
  555. w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
  556. W_REG(sii->osh, &ai->ioctrl, w);
  557. }
  558. return R_REG(sii->osh, &ai->ioctrl);
  559. }
  560. uint32
  561. ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
  562. {
  563. si_info_t *sii;
  564. aidmp_t *ai;
  565. uint32 w;
  566. sii = SI_INFO(sih);
  567. ASSERT(GOODREGS(sii->curwrap));
  568. ai = sii->curwrap;
  569. ASSERT((val & ~mask) == 0);
  570. ASSERT((mask & ~SISF_CORE_BITS) == 0);
  571. if (mask || val) {
  572. w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
  573. W_REG(sii->osh, &ai->iostatus, w);
  574. }
  575. return R_REG(sii->osh, &ai->iostatus);
  576. }