sbutils.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. /*
  2. * Misc utility routines for accessing chip-specific features
  3. * of the SiliconBackplane-based Broadcom chips.
  4. *
  5. * Copyright (C) 1999-2015, Broadcom Corporation
  6. *
  7. * Unless you and Broadcom execute a separate written software license
  8. * agreement governing use of this software, this software is licensed to you
  9. * under the terms of the GNU General Public License version 2 (the "GPL"),
  10. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  11. * following added to such license:
  12. *
  13. * As a special exception, the copyright holders of this software give you
  14. * permission to link this software with independent modules, and to copy and
  15. * distribute the resulting executable under terms of your choice, provided that
  16. * you also meet, for each linked independent module, the terms and conditions of
  17. * the license of that module. An independent module is a module which is not
  18. * derived from this software. The special exception does not apply to any
  19. * modifications of the software.
  20. *
  21. * Notwithstanding the above, under no circumstances may you combine this
  22. * software in any way with any other Broadcom software provided under a license
  23. * other than the GPL, without Broadcom's express prior written consent.
  24. *
  25. * $Id: sbutils.c 431423 2013-10-23 16:07:35Z $
  26. */
  27. #include <bcm_cfg.h>
  28. #include <typedefs.h>
  29. #include <bcmdefs.h>
  30. #include <osl.h>
  31. #include <bcmutils.h>
  32. #include <siutils.h>
  33. #include <bcmdevs.h>
  34. #include <hndsoc.h>
  35. #include <sbchipc.h>
  36. #include <pcicfg.h>
  37. #include <sbpcmcia.h>
  38. #include "siutils_priv.h"
  39. /* local prototypes */
  40. static uint _sb_coreidx(si_info_t *sii, uint32 sba);
  41. static uint _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba,
  42. uint ncores);
  43. static uint32 _sb_coresba(si_info_t *sii);
  44. static void *_sb_setcoreidx(si_info_t *sii, uint coreidx);
  45. #define SET_SBREG(sii, r, mask, val) \
  46. W_SBREG((sii), (r), ((R_SBREG((sii), (r)) & ~(mask)) | (val)))
  47. #define REGS2SB(va) (sbconfig_t*) ((int8*)(va) + SBCONFIGOFF)
  48. /* sonicsrev */
  49. #define SONICS_2_2 (SBIDL_RV_2_2 >> SBIDL_RV_SHIFT)
  50. #define SONICS_2_3 (SBIDL_RV_2_3 >> SBIDL_RV_SHIFT)
  51. #define R_SBREG(sii, sbr) sb_read_sbreg((sii), (sbr))
  52. #define W_SBREG(sii, sbr, v) sb_write_sbreg((sii), (sbr), (v))
  53. #define AND_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) & (v)))
  54. #define OR_SBREG(sii, sbr, v) W_SBREG((sii), (sbr), (R_SBREG((sii), (sbr)) | (v)))
  55. static uint32
  56. sb_read_sbreg(si_info_t *sii, volatile uint32 *sbr)
  57. {
  58. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  59. uint8 tmp;
  60. uint32 val, intr_val = 0;
  61. /*
  62. * compact flash only has 11 bits address, while we needs 12 bits address.
  63. * MEM_SEG will be OR'd with other 11 bits address in hardware,
  64. * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
  65. * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
  66. */
  67. if (PCMCIA(sii)) {
  68. INTR_OFF(sii, intr_val);
  69. tmp = 1;
  70. OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
  71. sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
  72. }
  73. val = R_REG(sii->osh, sbr);
  74. if (PCMCIA(sii)) {
  75. tmp = 0;
  76. OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
  77. INTR_RESTORE(sii, intr_val);
  78. }
  79. return (val);
  80. }
  81. static void
  82. sb_write_sbreg(si_info_t *sii, volatile uint32 *sbr, uint32 v)
  83. {
  84. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  85. uint8 tmp;
  86. volatile uint32 dummy;
  87. uint32 intr_val = 0;
  88. /*
  89. * compact flash only has 11 bits address, while we needs 12 bits address.
  90. * MEM_SEG will be OR'd with other 11 bits address in hardware,
  91. * so we program MEM_SEG with 12th bit when necessary(access sb regsiters).
  92. * For normal PCMCIA bus(CFTable_regwinsz > 2k), do nothing special
  93. */
  94. if (PCMCIA(sii)) {
  95. INTR_OFF(sii, intr_val);
  96. tmp = 1;
  97. OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
  98. sbr = (volatile uint32 *)((uintptr)sbr & ~(1 << 11)); /* mask out bit 11 */
  99. }
  100. if (BUSTYPE(sii->pub.bustype) == PCMCIA_BUS) {
  101. dummy = R_REG(sii->osh, sbr);
  102. BCM_REFERENCE(dummy);
  103. W_REG(sii->osh, (volatile uint16 *)sbr, (uint16)(v & 0xffff));
  104. dummy = R_REG(sii->osh, sbr);
  105. BCM_REFERENCE(dummy);
  106. W_REG(sii->osh, ((volatile uint16 *)sbr + 1), (uint16)((v >> 16) & 0xffff));
  107. } else
  108. W_REG(sii->osh, sbr, v);
  109. if (PCMCIA(sii)) {
  110. tmp = 0;
  111. OSL_PCMCIA_WRITE_ATTR(sii->osh, MEM_SEG, &tmp, 1);
  112. INTR_RESTORE(sii, intr_val);
  113. }
  114. }
  115. uint
  116. sb_coreid(si_t *sih)
  117. {
  118. si_info_t *sii;
  119. sbconfig_t *sb;
  120. sii = SI_INFO(sih);
  121. sb = REGS2SB(sii->curmap);
  122. return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT);
  123. }
  124. uint
  125. sb_intflag(si_t *sih)
  126. {
  127. si_info_t *sii = SI_INFO(sih);
  128. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  129. void *corereg;
  130. sbconfig_t *sb;
  131. uint origidx, intflag, intr_val = 0;
  132. INTR_OFF(sii, intr_val);
  133. origidx = si_coreidx(sih);
  134. corereg = si_setcore(sih, CC_CORE_ID, 0);
  135. ASSERT(corereg != NULL);
  136. sb = REGS2SB(corereg);
  137. intflag = R_SBREG(sii, &sb->sbflagst);
  138. sb_setcoreidx(sih, origidx);
  139. INTR_RESTORE(sii, intr_val);
  140. return intflag;
  141. }
  142. uint
  143. sb_flag(si_t *sih)
  144. {
  145. si_info_t *sii;
  146. sbconfig_t *sb;
  147. sii = SI_INFO(sih);
  148. sb = REGS2SB(sii->curmap);
  149. return R_SBREG(sii, &sb->sbtpsflag) & SBTPS_NUM0_MASK;
  150. }
  151. void
  152. sb_setint(si_t *sih, int siflag)
  153. {
  154. si_info_t *sii;
  155. sbconfig_t *sb;
  156. uint32 vec;
  157. sii = SI_INFO(sih);
  158. sb = REGS2SB(sii->curmap);
  159. if (siflag == -1)
  160. vec = 0;
  161. else
  162. vec = 1 << siflag;
  163. W_SBREG(sii, &sb->sbintvec, vec);
  164. }
  165. /* return core index of the core with address 'sba' */
  166. static uint
  167. _sb_coreidx(si_info_t *sii, uint32 sba)
  168. {
  169. uint i;
  170. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  171. for (i = 0; i < sii->numcores; i ++)
  172. if (sba == cores_info->coresba[i])
  173. return i;
  174. return BADIDX;
  175. }
  176. /* return core address of the current core */
  177. static uint32
  178. _sb_coresba(si_info_t *sii)
  179. {
  180. uint32 sbaddr;
  181. switch (BUSTYPE(sii->pub.bustype)) {
  182. case SI_BUS: {
  183. sbconfig_t *sb = REGS2SB(sii->curmap);
  184. sbaddr = sb_base(R_SBREG(sii, &sb->sbadmatch0));
  185. break;
  186. }
  187. case PCI_BUS:
  188. sbaddr = OSL_PCI_READ_CONFIG(sii->osh, PCI_BAR0_WIN, sizeof(uint32));
  189. break;
  190. case PCMCIA_BUS: {
  191. uint8 tmp = 0;
  192. OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
  193. sbaddr = (uint32)tmp << 12;
  194. OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
  195. sbaddr |= (uint32)tmp << 16;
  196. OSL_PCMCIA_READ_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
  197. sbaddr |= (uint32)tmp << 24;
  198. break;
  199. }
  200. case SPI_BUS:
  201. case SDIO_BUS:
  202. sbaddr = (uint32)(uintptr)sii->curmap;
  203. break;
  204. default:
  205. sbaddr = BADCOREADDR;
  206. break;
  207. }
  208. return sbaddr;
  209. }
  210. uint
  211. sb_corevendor(si_t *sih)
  212. {
  213. si_info_t *sii;
  214. sbconfig_t *sb;
  215. sii = SI_INFO(sih);
  216. sb = REGS2SB(sii->curmap);
  217. return ((R_SBREG(sii, &sb->sbidhigh) & SBIDH_VC_MASK) >> SBIDH_VC_SHIFT);
  218. }
  219. uint
  220. sb_corerev(si_t *sih)
  221. {
  222. si_info_t *sii;
  223. sbconfig_t *sb;
  224. uint sbidh;
  225. sii = SI_INFO(sih);
  226. sb = REGS2SB(sii->curmap);
  227. sbidh = R_SBREG(sii, &sb->sbidhigh);
  228. return (SBCOREREV(sbidh));
  229. }
  230. /* set core-specific control flags */
  231. void
  232. sb_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
  233. {
  234. si_info_t *sii;
  235. sbconfig_t *sb;
  236. uint32 w;
  237. sii = SI_INFO(sih);
  238. sb = REGS2SB(sii->curmap);
  239. ASSERT((val & ~mask) == 0);
  240. /* mask and set */
  241. w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
  242. (val << SBTML_SICF_SHIFT);
  243. W_SBREG(sii, &sb->sbtmstatelow, w);
  244. }
  245. /* set/clear core-specific control flags */
  246. uint32
  247. sb_core_cflags(si_t *sih, uint32 mask, uint32 val)
  248. {
  249. si_info_t *sii;
  250. sbconfig_t *sb;
  251. uint32 w;
  252. sii = SI_INFO(sih);
  253. sb = REGS2SB(sii->curmap);
  254. ASSERT((val & ~mask) == 0);
  255. /* mask and set */
  256. if (mask || val) {
  257. w = (R_SBREG(sii, &sb->sbtmstatelow) & ~(mask << SBTML_SICF_SHIFT)) |
  258. (val << SBTML_SICF_SHIFT);
  259. W_SBREG(sii, &sb->sbtmstatelow, w);
  260. }
  261. /* return the new value
  262. * for write operation, the following readback ensures the completion of write opration.
  263. */
  264. return (R_SBREG(sii, &sb->sbtmstatelow) >> SBTML_SICF_SHIFT);
  265. }
  266. /* set/clear core-specific status flags */
  267. uint32
  268. sb_core_sflags(si_t *sih, uint32 mask, uint32 val)
  269. {
  270. si_info_t *sii;
  271. sbconfig_t *sb;
  272. uint32 w;
  273. sii = SI_INFO(sih);
  274. sb = REGS2SB(sii->curmap);
  275. ASSERT((val & ~mask) == 0);
  276. ASSERT((mask & ~SISF_CORE_BITS) == 0);
  277. /* mask and set */
  278. if (mask || val) {
  279. w = (R_SBREG(sii, &sb->sbtmstatehigh) & ~(mask << SBTMH_SISF_SHIFT)) |
  280. (val << SBTMH_SISF_SHIFT);
  281. W_SBREG(sii, &sb->sbtmstatehigh, w);
  282. }
  283. /* return the new value */
  284. return (R_SBREG(sii, &sb->sbtmstatehigh) >> SBTMH_SISF_SHIFT);
  285. }
  286. bool
  287. sb_iscoreup(si_t *sih)
  288. {
  289. si_info_t *sii;
  290. sbconfig_t *sb;
  291. sii = SI_INFO(sih);
  292. sb = REGS2SB(sii->curmap);
  293. return ((R_SBREG(sii, &sb->sbtmstatelow) &
  294. (SBTML_RESET | SBTML_REJ_MASK | (SICF_CLOCK_EN << SBTML_SICF_SHIFT))) ==
  295. (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
  296. }
  297. /*
  298. * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
  299. * switch back to the original core, and return the new value.
  300. *
  301. * When using the silicon backplane, no fidleing with interrupts or core switches are needed.
  302. *
  303. * Also, when using pci/pcie, we can optimize away the core switching for pci registers
  304. * and (on newer pci cores) chipcommon registers.
  305. */
  306. uint
  307. sb_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
  308. {
  309. uint origidx = 0;
  310. uint32 *r = NULL;
  311. uint w;
  312. uint intr_val = 0;
  313. bool fast = FALSE;
  314. si_info_t *sii = SI_INFO(sih);
  315. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  316. ASSERT(GOODIDX(coreidx));
  317. ASSERT(regoff < SI_CORE_SIZE);
  318. ASSERT((val & ~mask) == 0);
  319. if (coreidx >= SI_MAXCORES)
  320. return 0;
  321. if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
  322. /* If internal bus, we can always get at everything */
  323. fast = TRUE;
  324. /* map if does not exist */
  325. if (!cores_info->regs[coreidx]) {
  326. cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
  327. SI_CORE_SIZE);
  328. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  329. }
  330. r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
  331. } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
  332. /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
  333. if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
  334. /* Chipc registers are mapped at 12KB */
  335. fast = TRUE;
  336. r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
  337. } else if (sii->pub.buscoreidx == coreidx) {
  338. /* pci registers are at either in the last 2KB of an 8KB window
  339. * or, in pcie and pci rev 13 at 8KB
  340. */
  341. fast = TRUE;
  342. if (SI_FAST(sii))
  343. r = (uint32 *)((char *)sii->curmap +
  344. PCI_16KB0_PCIREGS_OFFSET + regoff);
  345. else
  346. r = (uint32 *)((char *)sii->curmap +
  347. ((regoff >= SBCONFIGOFF) ?
  348. PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
  349. regoff);
  350. }
  351. }
  352. if (!fast) {
  353. INTR_OFF(sii, intr_val);
  354. /* save current core index */
  355. origidx = si_coreidx(&sii->pub);
  356. /* switch core */
  357. r = (uint32*) ((uchar*)sb_setcoreidx(&sii->pub, coreidx) + regoff);
  358. }
  359. ASSERT(r != NULL);
  360. /* mask and set */
  361. if (mask || val) {
  362. if (regoff >= SBCONFIGOFF) {
  363. w = (R_SBREG(sii, r) & ~mask) | val;
  364. W_SBREG(sii, r, w);
  365. } else {
  366. w = (R_REG(sii->osh, r) & ~mask) | val;
  367. W_REG(sii->osh, r, w);
  368. }
  369. }
  370. /* readback */
  371. if (regoff >= SBCONFIGOFF)
  372. w = R_SBREG(sii, r);
  373. else {
  374. if ((CHIPID(sii->pub.chip) == BCM5354_CHIP_ID) &&
  375. (coreidx == SI_CC_IDX) &&
  376. (regoff == OFFSETOF(chipcregs_t, watchdog))) {
  377. w = val;
  378. } else
  379. w = R_REG(sii->osh, r);
  380. }
  381. if (!fast) {
  382. /* restore core index */
  383. if (origidx != coreidx)
  384. sb_setcoreidx(&sii->pub, origidx);
  385. INTR_RESTORE(sii, intr_val);
  386. }
  387. return (w);
  388. }
  389. /*
  390. * If there is no need for fiddling with interrupts or core switches (typically silicon
  391. * back plane registers, pci registers and chipcommon registers), this function
  392. * returns the register offset on this core to a mapped address. This address can
  393. * be used for W_REG/R_REG directly.
  394. *
  395. * For accessing registers that would need a core switch, this function will return
  396. * NULL.
  397. */
  398. uint32 *
  399. sb_corereg_addr(si_t *sih, uint coreidx, uint regoff)
  400. {
  401. uint32 *r = NULL;
  402. bool fast = FALSE;
  403. si_info_t *sii = SI_INFO(sih);
  404. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  405. ASSERT(GOODIDX(coreidx));
  406. ASSERT(regoff < SI_CORE_SIZE);
  407. if (coreidx >= SI_MAXCORES)
  408. return 0;
  409. if (BUSTYPE(sii->pub.bustype) == SI_BUS) {
  410. /* If internal bus, we can always get at everything */
  411. fast = TRUE;
  412. /* map if does not exist */
  413. if (!cores_info->regs[coreidx]) {
  414. cores_info->regs[coreidx] = REG_MAP(cores_info->coresba[coreidx],
  415. SI_CORE_SIZE);
  416. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  417. }
  418. r = (uint32 *)((uchar *)cores_info->regs[coreidx] + regoff);
  419. } else if (BUSTYPE(sii->pub.bustype) == PCI_BUS) {
  420. /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
  421. if ((cores_info->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
  422. /* Chipc registers are mapped at 12KB */
  423. fast = TRUE;
  424. r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
  425. } else if (sii->pub.buscoreidx == coreidx) {
  426. /* pci registers are at either in the last 2KB of an 8KB window
  427. * or, in pcie and pci rev 13 at 8KB
  428. */
  429. fast = TRUE;
  430. if (SI_FAST(sii))
  431. r = (uint32 *)((char *)sii->curmap +
  432. PCI_16KB0_PCIREGS_OFFSET + regoff);
  433. else
  434. r = (uint32 *)((char *)sii->curmap +
  435. ((regoff >= SBCONFIGOFF) ?
  436. PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
  437. regoff);
  438. }
  439. }
  440. if (!fast)
  441. return 0;
  442. return (r);
  443. }
  444. /* Scan the enumeration space to find all cores starting from the given
  445. * bus 'sbba'. Append coreid and other info to the lists in 'si'. 'sba'
  446. * is the default core address at chip POR time and 'regs' is the virtual
  447. * address that the default core is mapped at. 'ncores' is the number of
  448. * cores expected on bus 'sbba'. It returns the total number of cores
  449. * starting from bus 'sbba', inclusive.
  450. */
  451. #define SB_MAXBUSES 2
  452. static uint
  453. _sb_scan(si_info_t *sii, uint32 sba, void *regs, uint bus, uint32 sbba, uint numcores)
  454. {
  455. uint next;
  456. uint ncc = 0;
  457. uint i;
  458. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  459. if (bus >= SB_MAXBUSES) {
  460. SI_ERROR(("_sb_scan: bus 0x%08x at level %d is too deep to scan\n", sbba, bus));
  461. return 0;
  462. }
  463. SI_MSG(("_sb_scan: scan bus 0x%08x assume %u cores\n", sbba, numcores));
  464. /* Scan all cores on the bus starting from core 0.
  465. * Core addresses must be contiguous on each bus.
  466. */
  467. for (i = 0, next = sii->numcores; i < numcores && next < SB_BUS_MAXCORES; i++, next++) {
  468. cores_info->coresba[next] = sbba + (i * SI_CORE_SIZE);
  469. /* keep and reuse the initial register mapping */
  470. if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && (cores_info->coresba[next] == sba)) {
  471. SI_VMSG(("_sb_scan: reuse mapped regs %p for core %u\n", regs, next));
  472. cores_info->regs[next] = regs;
  473. }
  474. /* change core to 'next' and read its coreid */
  475. sii->curmap = _sb_setcoreidx(sii, next);
  476. sii->curidx = next;
  477. cores_info->coreid[next] = sb_coreid(&sii->pub);
  478. /* core specific processing... */
  479. /* chipc provides # cores */
  480. if (cores_info->coreid[next] == CC_CORE_ID) {
  481. chipcregs_t *cc = (chipcregs_t *)sii->curmap;
  482. uint32 ccrev = sb_corerev(&sii->pub);
  483. /* determine numcores - this is the total # cores in the chip */
  484. if (((ccrev == 4) || (ccrev >= 6))) {
  485. ASSERT(cc);
  486. numcores = (R_REG(sii->osh, &cc->chipid) & CID_CC_MASK) >>
  487. CID_CC_SHIFT;
  488. } else {
  489. /* Older chips */
  490. uint chip = CHIPID(sii->pub.chip);
  491. if (chip == BCM4306_CHIP_ID) /* < 4306c0 */
  492. numcores = 6;
  493. else if (chip == BCM4704_CHIP_ID)
  494. numcores = 9;
  495. else if (chip == BCM5365_CHIP_ID)
  496. numcores = 7;
  497. else {
  498. SI_ERROR(("sb_chip2numcores: unsupported chip 0x%x\n",
  499. chip));
  500. ASSERT(0);
  501. numcores = 1;
  502. }
  503. }
  504. SI_VMSG(("_sb_scan: there are %u cores in the chip %s\n", numcores,
  505. sii->pub.issim ? "QT" : ""));
  506. }
  507. /* scan bridged SB(s) and add results to the end of the list */
  508. else if (cores_info->coreid[next] == OCP_CORE_ID) {
  509. sbconfig_t *sb = REGS2SB(sii->curmap);
  510. uint32 nsbba = R_SBREG(sii, &sb->sbadmatch1);
  511. uint nsbcc;
  512. sii->numcores = next + 1;
  513. if ((nsbba & 0xfff00000) != SI_ENUM_BASE)
  514. continue;
  515. nsbba &= 0xfffff000;
  516. if (_sb_coreidx(sii, nsbba) != BADIDX)
  517. continue;
  518. nsbcc = (R_SBREG(sii, &sb->sbtmstatehigh) & 0x000f0000) >> 16;
  519. nsbcc = _sb_scan(sii, sba, regs, bus + 1, nsbba, nsbcc);
  520. if (sbba == SI_ENUM_BASE)
  521. numcores -= nsbcc;
  522. ncc += nsbcc;
  523. }
  524. }
  525. SI_MSG(("_sb_scan: found %u cores on bus 0x%08x\n", i, sbba));
  526. sii->numcores = i + ncc;
  527. return sii->numcores;
  528. }
  529. /* scan the sb enumerated space to identify all cores */
  530. void
  531. sb_scan(si_t *sih, void *regs, uint devid)
  532. {
  533. uint32 origsba;
  534. sbconfig_t *sb;
  535. si_info_t *sii = SI_INFO(sih);
  536. sb = REGS2SB(sii->curmap);
  537. sii->pub.socirev = (R_SBREG(sii, &sb->sbidlow) & SBIDL_RV_MASK) >> SBIDL_RV_SHIFT;
  538. /* Save the current core info and validate it later till we know
  539. * for sure what is good and what is bad.
  540. */
  541. origsba = _sb_coresba(sii);
  542. /* scan all SB(s) starting from SI_ENUM_BASE */
  543. sii->numcores = _sb_scan(sii, origsba, regs, 0, SI_ENUM_BASE, 1);
  544. }
  545. /*
  546. * This function changes logical "focus" to the indicated core;
  547. * must be called with interrupts off.
  548. * Moreover, callers should keep interrupts off during switching out of and back to d11 core
  549. */
  550. void *
  551. sb_setcoreidx(si_t *sih, uint coreidx)
  552. {
  553. si_info_t *sii = SI_INFO(sih);
  554. if (coreidx >= sii->numcores)
  555. return (NULL);
  556. /*
  557. * If the user has provided an interrupt mask enabled function,
  558. * then assert interrupts are disabled before switching the core.
  559. */
  560. ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
  561. sii->curmap = _sb_setcoreidx(sii, coreidx);
  562. sii->curidx = coreidx;
  563. return (sii->curmap);
  564. }
  565. /* This function changes the logical "focus" to the indicated core.
  566. * Return the current core's virtual address.
  567. */
  568. static void *
  569. _sb_setcoreidx(si_info_t *sii, uint coreidx)
  570. {
  571. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  572. uint32 sbaddr = cores_info->coresba[coreidx];
  573. void *regs;
  574. switch (BUSTYPE(sii->pub.bustype)) {
  575. case SI_BUS:
  576. /* map new one */
  577. if (!cores_info->regs[coreidx]) {
  578. cores_info->regs[coreidx] = REG_MAP(sbaddr, SI_CORE_SIZE);
  579. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  580. }
  581. regs = cores_info->regs[coreidx];
  582. break;
  583. case PCI_BUS:
  584. /* point bar0 window */
  585. OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, sbaddr);
  586. regs = sii->curmap;
  587. break;
  588. case PCMCIA_BUS: {
  589. uint8 tmp = (sbaddr >> 12) & 0x0f;
  590. OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR0, &tmp, 1);
  591. tmp = (sbaddr >> 16) & 0xff;
  592. OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR1, &tmp, 1);
  593. tmp = (sbaddr >> 24) & 0xff;
  594. OSL_PCMCIA_WRITE_ATTR(sii->osh, PCMCIA_ADDR2, &tmp, 1);
  595. regs = sii->curmap;
  596. break;
  597. }
  598. case SPI_BUS:
  599. case SDIO_BUS:
  600. /* map new one */
  601. if (!cores_info->regs[coreidx]) {
  602. cores_info->regs[coreidx] = (void *)(uintptr)sbaddr;
  603. ASSERT(GOODREGS(cores_info->regs[coreidx]));
  604. }
  605. regs = cores_info->regs[coreidx];
  606. break;
  607. default:
  608. ASSERT(0);
  609. regs = NULL;
  610. break;
  611. }
  612. return regs;
  613. }
  614. /* Return the address of sbadmatch0/1/2/3 register */
  615. static volatile uint32 *
  616. sb_admatch(si_info_t *sii, uint asidx)
  617. {
  618. sbconfig_t *sb;
  619. volatile uint32 *addrm;
  620. sb = REGS2SB(sii->curmap);
  621. switch (asidx) {
  622. case 0:
  623. addrm = &sb->sbadmatch0;
  624. break;
  625. case 1:
  626. addrm = &sb->sbadmatch1;
  627. break;
  628. case 2:
  629. addrm = &sb->sbadmatch2;
  630. break;
  631. case 3:
  632. addrm = &sb->sbadmatch3;
  633. break;
  634. default:
  635. SI_ERROR(("%s: Address space index (%d) out of range\n", __FUNCTION__, asidx));
  636. return 0;
  637. }
  638. return (addrm);
  639. }
  640. /* Return the number of address spaces in current core */
  641. int
  642. sb_numaddrspaces(si_t *sih)
  643. {
  644. si_info_t *sii;
  645. sbconfig_t *sb;
  646. sii = SI_INFO(sih);
  647. sb = REGS2SB(sii->curmap);
  648. /* + 1 because of enumeration space */
  649. return ((R_SBREG(sii, &sb->sbidlow) & SBIDL_AR_MASK) >> SBIDL_AR_SHIFT) + 1;
  650. }
  651. /* Return the address of the nth address space in the current core */
  652. uint32
  653. sb_addrspace(si_t *sih, uint asidx)
  654. {
  655. si_info_t *sii;
  656. sii = SI_INFO(sih);
  657. return (sb_base(R_SBREG(sii, sb_admatch(sii, asidx))));
  658. }
  659. /* Return the size of the nth address space in the current core */
  660. uint32
  661. sb_addrspacesize(si_t *sih, uint asidx)
  662. {
  663. si_info_t *sii;
  664. sii = SI_INFO(sih);
  665. return (sb_size(R_SBREG(sii, sb_admatch(sii, asidx))));
  666. }
  667. /* do buffered registers update */
  668. void
  669. sb_commit(si_t *sih)
  670. {
  671. si_info_t *sii = SI_INFO(sih);
  672. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  673. uint origidx;
  674. uint intr_val = 0;
  675. origidx = sii->curidx;
  676. ASSERT(GOODIDX(origidx));
  677. INTR_OFF(sii, intr_val);
  678. /* switch over to chipcommon core if there is one, else use pci */
  679. if (sii->pub.ccrev != NOREV) {
  680. chipcregs_t *ccregs = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0);
  681. ASSERT(ccregs != NULL);
  682. /* do the buffer registers update */
  683. W_REG(sii->osh, &ccregs->broadcastaddress, SB_COMMIT);
  684. W_REG(sii->osh, &ccregs->broadcastdata, 0x0);
  685. } else
  686. ASSERT(0);
  687. /* restore core index */
  688. sb_setcoreidx(sih, origidx);
  689. INTR_RESTORE(sii, intr_val);
  690. }
  691. void
  692. sb_core_disable(si_t *sih, uint32 bits)
  693. {
  694. si_info_t *sii;
  695. volatile uint32 dummy;
  696. sbconfig_t *sb;
  697. sii = SI_INFO(sih);
  698. ASSERT(GOODREGS(sii->curmap));
  699. sb = REGS2SB(sii->curmap);
  700. /* if core is already in reset, just return */
  701. if (R_SBREG(sii, &sb->sbtmstatelow) & SBTML_RESET)
  702. return;
  703. /* if clocks are not enabled, put into reset and return */
  704. if ((R_SBREG(sii, &sb->sbtmstatelow) & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) == 0)
  705. goto disable;
  706. /* set target reject and spin until busy is clear (preserve core-specific bits) */
  707. OR_SBREG(sii, &sb->sbtmstatelow, SBTML_REJ);
  708. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  709. BCM_REFERENCE(dummy);
  710. OSL_DELAY(1);
  711. SPINWAIT((R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY), 100000);
  712. if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_BUSY)
  713. SI_ERROR(("%s: target state still busy\n", __FUNCTION__));
  714. if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT) {
  715. OR_SBREG(sii, &sb->sbimstate, SBIM_RJ);
  716. dummy = R_SBREG(sii, &sb->sbimstate);
  717. BCM_REFERENCE(dummy);
  718. OSL_DELAY(1);
  719. SPINWAIT((R_SBREG(sii, &sb->sbimstate) & SBIM_BY), 100000);
  720. }
  721. /* set reset and reject while enabling the clocks */
  722. W_SBREG(sii, &sb->sbtmstatelow,
  723. (((bits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
  724. SBTML_REJ | SBTML_RESET));
  725. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  726. BCM_REFERENCE(dummy);
  727. OSL_DELAY(10);
  728. /* don't forget to clear the initiator reject bit */
  729. if (R_SBREG(sii, &sb->sbidlow) & SBIDL_INIT)
  730. AND_SBREG(sii, &sb->sbimstate, ~SBIM_RJ);
  731. disable:
  732. /* leave reset and reject asserted */
  733. W_SBREG(sii, &sb->sbtmstatelow, ((bits << SBTML_SICF_SHIFT) | SBTML_REJ | SBTML_RESET));
  734. OSL_DELAY(1);
  735. }
  736. /* reset and re-enable a core
  737. * inputs:
  738. * bits - core specific bits that are set during and after reset sequence
  739. * resetbits - core specific bits that are set only during reset sequence
  740. */
  741. void
  742. sb_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
  743. {
  744. si_info_t *sii;
  745. sbconfig_t *sb;
  746. volatile uint32 dummy;
  747. sii = SI_INFO(sih);
  748. ASSERT(GOODREGS(sii->curmap));
  749. sb = REGS2SB(sii->curmap);
  750. /*
  751. * Must do the disable sequence first to work for arbitrary current core state.
  752. */
  753. sb_core_disable(sih, (bits | resetbits));
  754. /*
  755. * Now do the initialization sequence.
  756. */
  757. /* set reset while enabling the clock and forcing them on throughout the core */
  758. W_SBREG(sii, &sb->sbtmstatelow,
  759. (((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
  760. SBTML_RESET));
  761. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  762. BCM_REFERENCE(dummy);
  763. OSL_DELAY(1);
  764. if (R_SBREG(sii, &sb->sbtmstatehigh) & SBTMH_SERR) {
  765. W_SBREG(sii, &sb->sbtmstatehigh, 0);
  766. }
  767. if ((dummy = R_SBREG(sii, &sb->sbimstate)) & (SBIM_IBE | SBIM_TO)) {
  768. AND_SBREG(sii, &sb->sbimstate, ~(SBIM_IBE | SBIM_TO));
  769. }
  770. /* clear reset and allow it to propagate throughout the core */
  771. W_SBREG(sii, &sb->sbtmstatelow,
  772. ((bits | resetbits | SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
  773. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  774. BCM_REFERENCE(dummy);
  775. OSL_DELAY(1);
  776. /* leave clock enabled */
  777. W_SBREG(sii, &sb->sbtmstatelow, ((bits | SICF_CLOCK_EN) << SBTML_SICF_SHIFT));
  778. dummy = R_SBREG(sii, &sb->sbtmstatelow);
  779. BCM_REFERENCE(dummy);
  780. OSL_DELAY(1);
  781. }
  782. /*
  783. * Set the initiator timeout for the "master core".
  784. * The master core is defined to be the core in control
  785. * of the chip and so it issues accesses to non-memory
  786. * locations (Because of dma *any* core can access memeory).
  787. *
  788. * The routine uses the bus to decide who is the master:
  789. * SI_BUS => mips
  790. * JTAG_BUS => chipc
  791. * PCI_BUS => pci or pcie
  792. * PCMCIA_BUS => pcmcia
  793. * SDIO_BUS => pcmcia
  794. *
  795. * This routine exists so callers can disable initiator
  796. * timeouts so accesses to very slow devices like otp
  797. * won't cause an abort. The routine allows arbitrary
  798. * settings of the service and request timeouts, though.
  799. *
  800. * Returns the timeout state before changing it or -1
  801. * on error.
  802. */
  803. #define TO_MASK (SBIMCL_RTO_MASK | SBIMCL_STO_MASK)
  804. uint32
  805. sb_set_initiator_to(si_t *sih, uint32 to, uint idx)
  806. {
  807. si_info_t *sii = SI_INFO(sih);
  808. si_cores_info_t *cores_info = (si_cores_info_t *)sii->cores_info;
  809. uint origidx;
  810. uint intr_val = 0;
  811. uint32 tmp, ret = 0xffffffff;
  812. sbconfig_t *sb;
  813. if ((to & ~TO_MASK) != 0)
  814. return ret;
  815. /* Figure out the master core */
  816. if (idx == BADIDX) {
  817. switch (BUSTYPE(sii->pub.bustype)) {
  818. case PCI_BUS:
  819. idx = sii->pub.buscoreidx;
  820. break;
  821. case JTAG_BUS:
  822. idx = SI_CC_IDX;
  823. break;
  824. case PCMCIA_BUS:
  825. case SDIO_BUS:
  826. idx = si_findcoreidx(sih, PCMCIA_CORE_ID, 0);
  827. break;
  828. case SI_BUS:
  829. idx = si_findcoreidx(sih, MIPS33_CORE_ID, 0);
  830. break;
  831. default:
  832. ASSERT(0);
  833. }
  834. if (idx == BADIDX)
  835. return ret;
  836. }
  837. INTR_OFF(sii, intr_val);
  838. origidx = si_coreidx(sih);
  839. sb = REGS2SB(sb_setcoreidx(sih, idx));
  840. tmp = R_SBREG(sii, &sb->sbimconfiglow);
  841. ret = tmp & TO_MASK;
  842. W_SBREG(sii, &sb->sbimconfiglow, (tmp & ~TO_MASK) | to);
  843. sb_commit(sih);
  844. sb_setcoreidx(sih, origidx);
  845. INTR_RESTORE(sii, intr_val);
  846. return ret;
  847. }
  848. uint32
  849. sb_base(uint32 admatch)
  850. {
  851. uint32 base;
  852. uint type;
  853. type = admatch & SBAM_TYPE_MASK;
  854. ASSERT(type < 3);
  855. base = 0;
  856. if (type == 0) {
  857. base = admatch & SBAM_BASE0_MASK;
  858. } else if (type == 1) {
  859. ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
  860. base = admatch & SBAM_BASE1_MASK;
  861. } else if (type == 2) {
  862. ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
  863. base = admatch & SBAM_BASE2_MASK;
  864. }
  865. return (base);
  866. }
  867. uint32
  868. sb_size(uint32 admatch)
  869. {
  870. uint32 size;
  871. uint type;
  872. type = admatch & SBAM_TYPE_MASK;
  873. ASSERT(type < 3);
  874. size = 0;
  875. if (type == 0) {
  876. size = 1 << (((admatch & SBAM_ADINT0_MASK) >> SBAM_ADINT0_SHIFT) + 1);
  877. } else if (type == 1) {
  878. ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
  879. size = 1 << (((admatch & SBAM_ADINT1_MASK) >> SBAM_ADINT1_SHIFT) + 1);
  880. } else if (type == 2) {
  881. ASSERT(!(admatch & SBAM_ADNEG)); /* neg not supported */
  882. size = 1 << (((admatch & SBAM_ADINT2_MASK) >> SBAM_ADINT2_SHIFT) + 1);
  883. }
  884. return (size);
  885. }