cvmx-l2c.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948
  1. /***********************license start***************
  2. * Author: Cavium Networks
  3. *
  4. * Contact: support@caviumnetworks.com
  5. * This file is part of the OCTEON SDK
  6. *
  7. * Copyright (c) 2003-2010 Cavium Networks
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this file; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22. * or visit http://www.gnu.org/licenses/.
  23. *
  24. * This file may also be available under a different license from Cavium.
  25. * Contact Cavium Networks for more information
  26. ***********************license end**************************************/
  27. /*
  28. * Implementation of the Level 2 Cache (L2C) control,
  29. * measurement, and debugging facilities.
  30. */
  31. #include <linux/compiler.h>
  32. #include <linux/irqflags.h>
  33. #include <asm/octeon/cvmx.h>
  34. #include <asm/octeon/cvmx-l2c.h>
  35. #include <asm/octeon/cvmx-spinlock.h>
  36. /*
  37. * This spinlock is used internally to ensure that only one core is
  38. * performing certain L2 operations at a time.
  39. *
  40. * NOTE: This only protects calls from within a single application -
  41. * if multiple applications or operating systems are running, then it
  42. * is up to the user program to coordinate between them.
  43. */
  44. cvmx_spinlock_t cvmx_l2c_spinlock;
  45. int cvmx_l2c_get_core_way_partition(uint32_t core)
  46. {
  47. uint32_t field;
  48. /* Validate the core number */
  49. if (core >= cvmx_octeon_num_cores())
  50. return -1;
  51. if (OCTEON_IS_MODEL(OCTEON_CN63XX))
  52. return cvmx_read_csr(CVMX_L2C_WPAR_PPX(core)) & 0xffff;
  53. /*
  54. * Use the lower two bits of the coreNumber to determine the
  55. * bit offset of the UMSK[] field in the L2C_SPAR register.
  56. */
  57. field = (core & 0x3) * 8;
  58. /*
  59. * Return the UMSK[] field from the appropriate L2C_SPAR
  60. * register based on the coreNumber.
  61. */
  62. switch (core & 0xC) {
  63. case 0x0:
  64. return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >> field;
  65. case 0x4:
  66. return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >> field;
  67. case 0x8:
  68. return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >> field;
  69. case 0xC:
  70. return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >> field;
  71. }
  72. return 0;
  73. }
  74. int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
  75. {
  76. uint32_t field;
  77. uint32_t valid_mask;
  78. valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
  79. mask &= valid_mask;
  80. /* A UMSK setting which blocks all L2C Ways is an error on some chips */
  81. if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
  82. return -1;
  83. /* Validate the core number */
  84. if (core >= cvmx_octeon_num_cores())
  85. return -1;
  86. if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
  87. cvmx_write_csr(CVMX_L2C_WPAR_PPX(core), mask);
  88. return 0;
  89. }
  90. /*
  91. * Use the lower two bits of core to determine the bit offset of the
  92. * UMSK[] field in the L2C_SPAR register.
  93. */
  94. field = (core & 0x3) * 8;
  95. /*
  96. * Assign the new mask setting to the UMSK[] field in the appropriate
  97. * L2C_SPAR register based on the core_num.
  98. *
  99. */
  100. switch (core & 0xC) {
  101. case 0x0:
  102. cvmx_write_csr(CVMX_L2C_SPAR0,
  103. (cvmx_read_csr(CVMX_L2C_SPAR0) & ~(0xFF << field)) |
  104. mask << field);
  105. break;
  106. case 0x4:
  107. cvmx_write_csr(CVMX_L2C_SPAR1,
  108. (cvmx_read_csr(CVMX_L2C_SPAR1) & ~(0xFF << field)) |
  109. mask << field);
  110. break;
  111. case 0x8:
  112. cvmx_write_csr(CVMX_L2C_SPAR2,
  113. (cvmx_read_csr(CVMX_L2C_SPAR2) & ~(0xFF << field)) |
  114. mask << field);
  115. break;
  116. case 0xC:
  117. cvmx_write_csr(CVMX_L2C_SPAR3,
  118. (cvmx_read_csr(CVMX_L2C_SPAR3) & ~(0xFF << field)) |
  119. mask << field);
  120. break;
  121. }
  122. return 0;
  123. }
  124. int cvmx_l2c_set_hw_way_partition(uint32_t mask)
  125. {
  126. uint32_t valid_mask;
  127. valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
  128. mask &= valid_mask;
  129. /* A UMSK setting which blocks all L2C Ways is an error on some chips */
  130. if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
  131. return -1;
  132. if (OCTEON_IS_MODEL(OCTEON_CN63XX))
  133. cvmx_write_csr(CVMX_L2C_WPAR_IOBX(0), mask);
  134. else
  135. cvmx_write_csr(CVMX_L2C_SPAR4,
  136. (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
  137. return 0;
  138. }
  139. int cvmx_l2c_get_hw_way_partition(void)
  140. {
  141. if (OCTEON_IS_MODEL(OCTEON_CN63XX))
  142. return cvmx_read_csr(CVMX_L2C_WPAR_IOBX(0)) & 0xffff;
  143. else
  144. return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
  145. }
  146. void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
  147. uint32_t clear_on_read)
  148. {
  149. if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
  150. union cvmx_l2c_pfctl pfctl;
  151. pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
  152. switch (counter) {
  153. case 0:
  154. pfctl.s.cnt0sel = event;
  155. pfctl.s.cnt0ena = 1;
  156. pfctl.s.cnt0rdclr = clear_on_read;
  157. break;
  158. case 1:
  159. pfctl.s.cnt1sel = event;
  160. pfctl.s.cnt1ena = 1;
  161. pfctl.s.cnt1rdclr = clear_on_read;
  162. break;
  163. case 2:
  164. pfctl.s.cnt2sel = event;
  165. pfctl.s.cnt2ena = 1;
  166. pfctl.s.cnt2rdclr = clear_on_read;
  167. break;
  168. case 3:
  169. default:
  170. pfctl.s.cnt3sel = event;
  171. pfctl.s.cnt3ena = 1;
  172. pfctl.s.cnt3rdclr = clear_on_read;
  173. break;
  174. }
  175. cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
  176. } else {
  177. union cvmx_l2c_tadx_prf l2c_tadx_prf;
  178. int tad;
  179. cvmx_dprintf("L2C performance counter events are different for this chip, mapping 'event' to cvmx_l2c_tad_event_t\n");
  180. if (clear_on_read)
  181. cvmx_dprintf("L2C counters don't support clear on read for this chip\n");
  182. l2c_tadx_prf.u64 = cvmx_read_csr(CVMX_L2C_TADX_PRF(0));
  183. switch (counter) {
  184. case 0:
  185. l2c_tadx_prf.s.cnt0sel = event;
  186. break;
  187. case 1:
  188. l2c_tadx_prf.s.cnt1sel = event;
  189. break;
  190. case 2:
  191. l2c_tadx_prf.s.cnt2sel = event;
  192. break;
  193. default:
  194. case 3:
  195. l2c_tadx_prf.s.cnt3sel = event;
  196. break;
  197. }
  198. for (tad = 0; tad < CVMX_L2C_TADS; tad++)
  199. cvmx_write_csr(CVMX_L2C_TADX_PRF(tad),
  200. l2c_tadx_prf.u64);
  201. }
  202. }
  203. uint64_t cvmx_l2c_read_perf(uint32_t counter)
  204. {
  205. switch (counter) {
  206. case 0:
  207. if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
  208. return cvmx_read_csr(CVMX_L2C_PFC0);
  209. else {
  210. uint64_t counter = 0;
  211. int tad;
  212. for (tad = 0; tad < CVMX_L2C_TADS; tad++)
  213. counter += cvmx_read_csr(CVMX_L2C_TADX_PFC0(tad));
  214. return counter;
  215. }
  216. case 1:
  217. if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
  218. return cvmx_read_csr(CVMX_L2C_PFC1);
  219. else {
  220. uint64_t counter = 0;
  221. int tad;
  222. for (tad = 0; tad < CVMX_L2C_TADS; tad++)
  223. counter += cvmx_read_csr(CVMX_L2C_TADX_PFC1(tad));
  224. return counter;
  225. }
  226. case 2:
  227. if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
  228. return cvmx_read_csr(CVMX_L2C_PFC2);
  229. else {
  230. uint64_t counter = 0;
  231. int tad;
  232. for (tad = 0; tad < CVMX_L2C_TADS; tad++)
  233. counter += cvmx_read_csr(CVMX_L2C_TADX_PFC2(tad));
  234. return counter;
  235. }
  236. case 3:
  237. default:
  238. if (OCTEON_IS_MODEL(OCTEON_CN5XXX) || OCTEON_IS_MODEL(OCTEON_CN3XXX))
  239. return cvmx_read_csr(CVMX_L2C_PFC3);
  240. else {
  241. uint64_t counter = 0;
  242. int tad;
  243. for (tad = 0; tad < CVMX_L2C_TADS; tad++)
  244. counter += cvmx_read_csr(CVMX_L2C_TADX_PFC3(tad));
  245. return counter;
  246. }
  247. }
  248. }
  249. /**
  250. * @INTERNAL
  251. * Helper function use to fault in cache lines for L2 cache locking
  252. *
  253. * @addr: Address of base of memory region to read into L2 cache
  254. * @len: Length (in bytes) of region to fault in
  255. */
  256. static void fault_in(uint64_t addr, int len)
  257. {
  258. char *ptr;
  259. /*
  260. * Adjust addr and length so we get all cache lines even for
  261. * small ranges spanning two cache lines.
  262. */
  263. len += addr & CVMX_CACHE_LINE_MASK;
  264. addr &= ~CVMX_CACHE_LINE_MASK;
  265. ptr = cvmx_phys_to_ptr(addr);
  266. /*
  267. * Invalidate L1 cache to make sure all loads result in data
  268. * being in L2.
  269. */
  270. CVMX_DCACHE_INVALIDATE;
  271. while (len > 0) {
  272. ACCESS_ONCE(*ptr);
  273. len -= CVMX_CACHE_LINE_SIZE;
  274. ptr += CVMX_CACHE_LINE_SIZE;
  275. }
  276. }
  277. int cvmx_l2c_lock_line(uint64_t addr)
  278. {
  279. if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
  280. int shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
  281. uint64_t assoc = cvmx_l2c_get_num_assoc();
  282. uint64_t tag = addr >> shift;
  283. uint64_t index = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, cvmx_l2c_address_to_index(addr) << CVMX_L2C_IDX_ADDR_SHIFT);
  284. uint64_t way;
  285. union cvmx_l2c_tadx_tag l2c_tadx_tag;
  286. CVMX_CACHE_LCKL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, addr), 0);
  287. /* Make sure we were able to lock the line */
  288. for (way = 0; way < assoc; way++) {
  289. CVMX_CACHE_LTGL2I(index | (way << shift), 0);
  290. /* make sure CVMX_L2C_TADX_TAG is updated */
  291. CVMX_SYNC;
  292. l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
  293. if (l2c_tadx_tag.s.valid && l2c_tadx_tag.s.tag == tag)
  294. break;
  295. }
  296. /* Check if a valid line is found */
  297. if (way >= assoc) {
  298. /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: line not found for locking at 0x%llx address\n", (unsigned long long)addr); */
  299. return -1;
  300. }
  301. /* Check if lock bit is not set */
  302. if (!l2c_tadx_tag.s.lock) {
  303. /* cvmx_dprintf("ERROR: cvmx_l2c_lock_line: Not able to lock at 0x%llx address\n", (unsigned long long)addr); */
  304. return -1;
  305. }
  306. return way;
  307. } else {
  308. int retval = 0;
  309. union cvmx_l2c_dbg l2cdbg;
  310. union cvmx_l2c_lckbase lckbase;
  311. union cvmx_l2c_lckoff lckoff;
  312. union cvmx_l2t_err l2t_err;
  313. cvmx_spinlock_lock(&cvmx_l2c_spinlock);
  314. l2cdbg.u64 = 0;
  315. lckbase.u64 = 0;
  316. lckoff.u64 = 0;
  317. /* Clear l2t error bits if set */
  318. l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
  319. l2t_err.s.lckerr = 1;
  320. l2t_err.s.lckerr2 = 1;
  321. cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
  322. addr &= ~CVMX_CACHE_LINE_MASK;
  323. /* Set this core as debug core */
  324. l2cdbg.s.ppnum = cvmx_get_core_num();
  325. CVMX_SYNC;
  326. cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
  327. cvmx_read_csr(CVMX_L2C_DBG);
  328. lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
  329. cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
  330. cvmx_read_csr(CVMX_L2C_LCKOFF);
  331. if (((union cvmx_l2c_cfg)(cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
  332. int alias_shift = CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
  333. uint64_t addr_tmp = addr ^ (addr & ((1 << alias_shift) - 1)) >> CVMX_L2_SET_BITS;
  334. lckbase.s.lck_base = addr_tmp >> 7;
  335. } else {
  336. lckbase.s.lck_base = addr >> 7;
  337. }
  338. lckbase.s.lck_ena = 1;
  339. cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
  340. /* Make sure it gets there */
  341. cvmx_read_csr(CVMX_L2C_LCKBASE);
  342. fault_in(addr, CVMX_CACHE_LINE_SIZE);
  343. lckbase.s.lck_ena = 0;
  344. cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
  345. /* Make sure it gets there */
  346. cvmx_read_csr(CVMX_L2C_LCKBASE);
  347. /* Stop being debug core */
  348. cvmx_write_csr(CVMX_L2C_DBG, 0);
  349. cvmx_read_csr(CVMX_L2C_DBG);
  350. l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
  351. if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
  352. retval = 1; /* We were unable to lock the line */
  353. cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
  354. return retval;
  355. }
  356. }
  357. int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
  358. {
  359. int retval = 0;
  360. /* Round start/end to cache line boundaries */
  361. len += start & CVMX_CACHE_LINE_MASK;
  362. start &= ~CVMX_CACHE_LINE_MASK;
  363. len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
  364. while (len) {
  365. retval += cvmx_l2c_lock_line(start);
  366. start += CVMX_CACHE_LINE_SIZE;
  367. len -= CVMX_CACHE_LINE_SIZE;
  368. }
  369. return retval;
  370. }
  371. void cvmx_l2c_flush(void)
  372. {
  373. uint64_t assoc, set;
  374. uint64_t n_assoc, n_set;
  375. n_set = cvmx_l2c_get_num_sets();
  376. n_assoc = cvmx_l2c_get_num_assoc();
  377. if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
  378. uint64_t address;
  379. /* These may look like constants, but they aren't... */
  380. int assoc_shift = CVMX_L2C_TAG_ADDR_ALIAS_SHIFT;
  381. int set_shift = CVMX_L2C_IDX_ADDR_SHIFT;
  382. for (set = 0; set < n_set; set++) {
  383. for (assoc = 0; assoc < n_assoc; assoc++) {
  384. address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  385. (assoc << assoc_shift) | (set << set_shift));
  386. CVMX_CACHE_WBIL2I(address, 0);
  387. }
  388. }
  389. } else {
  390. for (set = 0; set < n_set; set++)
  391. for (assoc = 0; assoc < n_assoc; assoc++)
  392. cvmx_l2c_flush_line(assoc, set);
  393. }
  394. }
  395. int cvmx_l2c_unlock_line(uint64_t address)
  396. {
  397. if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
  398. int assoc;
  399. union cvmx_l2c_tag tag;
  400. uint32_t tag_addr;
  401. uint32_t index = cvmx_l2c_address_to_index(address);
  402. tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
  403. /*
  404. * For 63XX, we can flush a line by using the physical
  405. * address directly, so finding the cache line used by
  406. * the address is only required to provide the proper
  407. * return value for the function.
  408. */
  409. for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
  410. tag = cvmx_l2c_get_tag(assoc, index);
  411. if (tag.s.V && (tag.s.addr == tag_addr)) {
  412. CVMX_CACHE_WBIL2(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, address), 0);
  413. return tag.s.L;
  414. }
  415. }
  416. } else {
  417. int assoc;
  418. union cvmx_l2c_tag tag;
  419. uint32_t tag_addr;
  420. uint32_t index = cvmx_l2c_address_to_index(address);
  421. /* Compute portion of address that is stored in tag */
  422. tag_addr = ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) & ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
  423. for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
  424. tag = cvmx_l2c_get_tag(assoc, index);
  425. if (tag.s.V && (tag.s.addr == tag_addr)) {
  426. cvmx_l2c_flush_line(assoc, index);
  427. return tag.s.L;
  428. }
  429. }
  430. }
  431. return 0;
  432. }
  433. int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
  434. {
  435. int num_unlocked = 0;
  436. /* Round start/end to cache line boundaries */
  437. len += start & CVMX_CACHE_LINE_MASK;
  438. start &= ~CVMX_CACHE_LINE_MASK;
  439. len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
  440. while (len > 0) {
  441. num_unlocked += cvmx_l2c_unlock_line(start);
  442. start += CVMX_CACHE_LINE_SIZE;
  443. len -= CVMX_CACHE_LINE_SIZE;
  444. }
  445. return num_unlocked;
  446. }
  447. /*
  448. * Internal l2c tag types. These are converted to a generic structure
  449. * that can be used on all chips.
  450. */
  451. union __cvmx_l2c_tag {
  452. uint64_t u64;
  453. struct cvmx_l2c_tag_cn50xx {
  454. #ifdef __BIG_ENDIAN_BITFIELD
  455. uint64_t reserved:40;
  456. uint64_t V:1; /* Line valid */
  457. uint64_t D:1; /* Line dirty */
  458. uint64_t L:1; /* Line locked */
  459. uint64_t U:1; /* Use, LRU eviction */
  460. uint64_t addr:20; /* Phys mem addr (33..14) */
  461. #else
  462. uint64_t addr:20; /* Phys mem addr (33..14) */
  463. uint64_t U:1; /* Use, LRU eviction */
  464. uint64_t L:1; /* Line locked */
  465. uint64_t D:1; /* Line dirty */
  466. uint64_t V:1; /* Line valid */
  467. uint64_t reserved:40;
  468. #endif
  469. } cn50xx;
  470. struct cvmx_l2c_tag_cn30xx {
  471. #ifdef __BIG_ENDIAN_BITFIELD
  472. uint64_t reserved:41;
  473. uint64_t V:1; /* Line valid */
  474. uint64_t D:1; /* Line dirty */
  475. uint64_t L:1; /* Line locked */
  476. uint64_t U:1; /* Use, LRU eviction */
  477. uint64_t addr:19; /* Phys mem addr (33..15) */
  478. #else
  479. uint64_t addr:19; /* Phys mem addr (33..15) */
  480. uint64_t U:1; /* Use, LRU eviction */
  481. uint64_t L:1; /* Line locked */
  482. uint64_t D:1; /* Line dirty */
  483. uint64_t V:1; /* Line valid */
  484. uint64_t reserved:41;
  485. #endif
  486. } cn30xx;
  487. struct cvmx_l2c_tag_cn31xx {
  488. #ifdef __BIG_ENDIAN_BITFIELD
  489. uint64_t reserved:42;
  490. uint64_t V:1; /* Line valid */
  491. uint64_t D:1; /* Line dirty */
  492. uint64_t L:1; /* Line locked */
  493. uint64_t U:1; /* Use, LRU eviction */
  494. uint64_t addr:18; /* Phys mem addr (33..16) */
  495. #else
  496. uint64_t addr:18; /* Phys mem addr (33..16) */
  497. uint64_t U:1; /* Use, LRU eviction */
  498. uint64_t L:1; /* Line locked */
  499. uint64_t D:1; /* Line dirty */
  500. uint64_t V:1; /* Line valid */
  501. uint64_t reserved:42;
  502. #endif
  503. } cn31xx;
  504. struct cvmx_l2c_tag_cn38xx {
  505. #ifdef __BIG_ENDIAN_BITFIELD
  506. uint64_t reserved:43;
  507. uint64_t V:1; /* Line valid */
  508. uint64_t D:1; /* Line dirty */
  509. uint64_t L:1; /* Line locked */
  510. uint64_t U:1; /* Use, LRU eviction */
  511. uint64_t addr:17; /* Phys mem addr (33..17) */
  512. #else
  513. uint64_t addr:17; /* Phys mem addr (33..17) */
  514. uint64_t U:1; /* Use, LRU eviction */
  515. uint64_t L:1; /* Line locked */
  516. uint64_t D:1; /* Line dirty */
  517. uint64_t V:1; /* Line valid */
  518. uint64_t reserved:43;
  519. #endif
  520. } cn38xx;
  521. struct cvmx_l2c_tag_cn58xx {
  522. #ifdef __BIG_ENDIAN_BITFIELD
  523. uint64_t reserved:44;
  524. uint64_t V:1; /* Line valid */
  525. uint64_t D:1; /* Line dirty */
  526. uint64_t L:1; /* Line locked */
  527. uint64_t U:1; /* Use, LRU eviction */
  528. uint64_t addr:16; /* Phys mem addr (33..18) */
  529. #else
  530. uint64_t addr:16; /* Phys mem addr (33..18) */
  531. uint64_t U:1; /* Use, LRU eviction */
  532. uint64_t L:1; /* Line locked */
  533. uint64_t D:1; /* Line dirty */
  534. uint64_t V:1; /* Line valid */
  535. uint64_t reserved:44;
  536. #endif
  537. } cn58xx;
  538. struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
  539. struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
  540. };
  541. /**
  542. * @INTERNAL
  543. * Function to read a L2C tag. This code make the current core
  544. * the 'debug core' for the L2. This code must only be executed by
  545. * 1 core at a time.
  546. *
  547. * @assoc: Association (way) of the tag to dump
  548. * @index: Index of the cacheline
  549. *
  550. * Returns The Octeon model specific tag structure. This is
  551. * translated by a wrapper function to a generic form that is
  552. * easier for applications to use.
  553. */
  554. static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
  555. {
  556. uint64_t debug_tag_addr = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, (index << 7) + 96);
  557. uint64_t core = cvmx_get_core_num();
  558. union __cvmx_l2c_tag tag_val;
  559. uint64_t dbg_addr = CVMX_L2C_DBG;
  560. unsigned long flags;
  561. union cvmx_l2c_dbg debug_val;
  562. debug_val.u64 = 0;
  563. /*
  564. * For low core count parts, the core number is always small
  565. * enough to stay in the correct field and not set any
  566. * reserved bits.
  567. */
  568. debug_val.s.ppnum = core;
  569. debug_val.s.l2t = 1;
  570. debug_val.s.set = assoc;
  571. local_irq_save(flags);
  572. /*
  573. * Make sure core is quiet (no prefetches, etc.) before
  574. * entering debug mode.
  575. */
  576. CVMX_SYNC;
  577. /* Flush L1 to make sure debug load misses L1 */
  578. CVMX_DCACHE_INVALIDATE;
  579. /*
  580. * The following must be done in assembly as when in debug
  581. * mode all data loads from L2 return special debug data, not
  582. * normal memory contents. Also, interrupts must be disabled,
  583. * since if an interrupt occurs while in debug mode the ISR
  584. * will get debug data from all its memory * reads instead of
  585. * the contents of memory.
  586. */
  587. asm volatile (
  588. ".set push\n\t"
  589. ".set mips64\n\t"
  590. ".set noreorder\n\t"
  591. "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */
  592. "ld $0, 0(%[dbg_addr])\n\t"
  593. "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */
  594. "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */
  595. "ld $0, 0(%[dbg_addr])\n\t"
  596. "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */
  597. ".set pop"
  598. : [tag_val] "=r" (tag_val)
  599. : [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
  600. : "memory");
  601. local_irq_restore(flags);
  602. return tag_val;
  603. }
  604. union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
  605. {
  606. union cvmx_l2c_tag tag;
  607. tag.u64 = 0;
  608. if ((int)association >= cvmx_l2c_get_num_assoc()) {
  609. cvmx_dprintf("ERROR: cvmx_l2c_get_tag association out of range\n");
  610. return tag;
  611. }
  612. if ((int)index >= cvmx_l2c_get_num_sets()) {
  613. cvmx_dprintf("ERROR: cvmx_l2c_get_tag index out of range (arg: %d, max: %d)\n",
  614. (int)index, cvmx_l2c_get_num_sets());
  615. return tag;
  616. }
  617. if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
  618. union cvmx_l2c_tadx_tag l2c_tadx_tag;
  619. uint64_t address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  620. (association << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
  621. (index << CVMX_L2C_IDX_ADDR_SHIFT));
  622. /*
  623. * Use L2 cache Index load tag cache instruction, as
  624. * hardware loads the virtual tag for the L2 cache
  625. * block with the contents of L2C_TAD0_TAG
  626. * register.
  627. */
  628. CVMX_CACHE_LTGL2I(address, 0);
  629. CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */
  630. l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
  631. tag.s.V = l2c_tadx_tag.s.valid;
  632. tag.s.D = l2c_tadx_tag.s.dirty;
  633. tag.s.L = l2c_tadx_tag.s.lock;
  634. tag.s.U = l2c_tadx_tag.s.use;
  635. tag.s.addr = l2c_tadx_tag.s.tag;
  636. } else {
  637. union __cvmx_l2c_tag tmp_tag;
  638. /* __read_l2_tag is intended for internal use only */
  639. tmp_tag = __read_l2_tag(association, index);
  640. /*
  641. * Convert all tag structure types to generic version,
  642. * as it can represent all models.
  643. */
  644. if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
  645. tag.s.V = tmp_tag.cn58xx.V;
  646. tag.s.D = tmp_tag.cn58xx.D;
  647. tag.s.L = tmp_tag.cn58xx.L;
  648. tag.s.U = tmp_tag.cn58xx.U;
  649. tag.s.addr = tmp_tag.cn58xx.addr;
  650. } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
  651. tag.s.V = tmp_tag.cn38xx.V;
  652. tag.s.D = tmp_tag.cn38xx.D;
  653. tag.s.L = tmp_tag.cn38xx.L;
  654. tag.s.U = tmp_tag.cn38xx.U;
  655. tag.s.addr = tmp_tag.cn38xx.addr;
  656. } else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
  657. tag.s.V = tmp_tag.cn31xx.V;
  658. tag.s.D = tmp_tag.cn31xx.D;
  659. tag.s.L = tmp_tag.cn31xx.L;
  660. tag.s.U = tmp_tag.cn31xx.U;
  661. tag.s.addr = tmp_tag.cn31xx.addr;
  662. } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
  663. tag.s.V = tmp_tag.cn30xx.V;
  664. tag.s.D = tmp_tag.cn30xx.D;
  665. tag.s.L = tmp_tag.cn30xx.L;
  666. tag.s.U = tmp_tag.cn30xx.U;
  667. tag.s.addr = tmp_tag.cn30xx.addr;
  668. } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
  669. tag.s.V = tmp_tag.cn50xx.V;
  670. tag.s.D = tmp_tag.cn50xx.D;
  671. tag.s.L = tmp_tag.cn50xx.L;
  672. tag.s.U = tmp_tag.cn50xx.U;
  673. tag.s.addr = tmp_tag.cn50xx.addr;
  674. } else {
  675. cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
  676. }
  677. }
  678. return tag;
  679. }
  680. uint32_t cvmx_l2c_address_to_index(uint64_t addr)
  681. {
  682. uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
  683. int indxalias = 0;
  684. if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
  685. union cvmx_l2c_ctl l2c_ctl;
  686. l2c_ctl.u64 = cvmx_read_csr(CVMX_L2C_CTL);
  687. indxalias = !l2c_ctl.s.disidxalias;
  688. } else {
  689. union cvmx_l2c_cfg l2c_cfg;
  690. l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
  691. indxalias = l2c_cfg.s.idxalias;
  692. }
  693. if (indxalias) {
  694. if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
  695. uint32_t a_14_12 = (idx / (CVMX_L2C_MEMBANK_SELECT_SIZE/(1<<CVMX_L2C_IDX_ADDR_SHIFT))) & 0x7;
  696. idx ^= idx / cvmx_l2c_get_num_sets();
  697. idx ^= a_14_12;
  698. } else {
  699. idx ^= ((addr & CVMX_L2C_ALIAS_MASK) >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
  700. }
  701. }
  702. idx &= CVMX_L2C_IDX_MASK;
  703. return idx;
  704. }
  705. int cvmx_l2c_get_cache_size_bytes(void)
  706. {
  707. return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
  708. CVMX_CACHE_LINE_SIZE;
  709. }
  710. /**
  711. * Return log base 2 of the number of sets in the L2 cache
  712. * Returns
  713. */
  714. int cvmx_l2c_get_set_bits(void)
  715. {
  716. int l2_set_bits;
  717. if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
  718. l2_set_bits = 11; /* 2048 sets */
  719. else if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
  720. l2_set_bits = 10; /* 1024 sets */
  721. else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
  722. l2_set_bits = 9; /* 512 sets */
  723. else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
  724. l2_set_bits = 8; /* 256 sets */
  725. else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
  726. l2_set_bits = 7; /* 128 sets */
  727. else {
  728. cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
  729. l2_set_bits = 11; /* 2048 sets */
  730. }
  731. return l2_set_bits;
  732. }
  733. /* Return the number of sets in the L2 Cache */
  734. int cvmx_l2c_get_num_sets(void)
  735. {
  736. return 1 << cvmx_l2c_get_set_bits();
  737. }
  738. /* Return the number of associations in the L2 Cache */
  739. int cvmx_l2c_get_num_assoc(void)
  740. {
  741. int l2_assoc;
  742. if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
  743. OCTEON_IS_MODEL(OCTEON_CN52XX) ||
  744. OCTEON_IS_MODEL(OCTEON_CN58XX) ||
  745. OCTEON_IS_MODEL(OCTEON_CN50XX) ||
  746. OCTEON_IS_MODEL(OCTEON_CN38XX))
  747. l2_assoc = 8;
  748. else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
  749. l2_assoc = 16;
  750. else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
  751. OCTEON_IS_MODEL(OCTEON_CN30XX))
  752. l2_assoc = 4;
  753. else {
  754. cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
  755. l2_assoc = 8;
  756. }
  757. /* Check to see if part of the cache is disabled */
  758. if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
  759. union cvmx_mio_fus_dat3 mio_fus_dat3;
  760. mio_fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
  761. /*
  762. * cvmx_mio_fus_dat3.s.l2c_crip fuses map as follows
  763. * <2> will be not used for 63xx
  764. * <1> disables 1/2 ways
  765. * <0> disables 1/4 ways
  766. * They are cumulative, so for 63xx:
  767. * <1> <0>
  768. * 0 0 16-way 2MB cache
  769. * 0 1 12-way 1.5MB cache
  770. * 1 0 8-way 1MB cache
  771. * 1 1 4-way 512KB cache
  772. */
  773. if (mio_fus_dat3.s.l2c_crip == 3)
  774. l2_assoc = 4;
  775. else if (mio_fus_dat3.s.l2c_crip == 2)
  776. l2_assoc = 8;
  777. else if (mio_fus_dat3.s.l2c_crip == 1)
  778. l2_assoc = 12;
  779. } else {
  780. union cvmx_l2d_fus3 val;
  781. val.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
  782. /*
  783. * Using shifts here, as bit position names are
  784. * different for each model but they all mean the
  785. * same.
  786. */
  787. if ((val.u64 >> 35) & 0x1)
  788. l2_assoc = l2_assoc >> 2;
  789. else if ((val.u64 >> 34) & 0x1)
  790. l2_assoc = l2_assoc >> 1;
  791. }
  792. return l2_assoc;
  793. }
  794. /**
  795. * Flush a line from the L2 cache
  796. * This should only be called from one core at a time, as this routine
  797. * sets the core to the 'debug' core in order to flush the line.
  798. *
  799. * @assoc: Association (or way) to flush
  800. * @index: Index to flush
  801. */
  802. void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
  803. {
  804. /* Check the range of the index. */
  805. if (index > (uint32_t)cvmx_l2c_get_num_sets()) {
  806. cvmx_dprintf("ERROR: cvmx_l2c_flush_line index out of range.\n");
  807. return;
  808. }
  809. /* Check the range of association. */
  810. if (assoc > (uint32_t)cvmx_l2c_get_num_assoc()) {
  811. cvmx_dprintf("ERROR: cvmx_l2c_flush_line association out of range.\n");
  812. return;
  813. }
  814. if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
  815. uint64_t address;
  816. /* Create the address based on index and association.
  817. * Bits<20:17> select the way of the cache block involved in
  818. * the operation
  819. * Bits<16:7> of the effect address select the index
  820. */
  821. address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  822. (assoc << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) |
  823. (index << CVMX_L2C_IDX_ADDR_SHIFT));
  824. CVMX_CACHE_WBIL2I(address, 0);
  825. } else {
  826. union cvmx_l2c_dbg l2cdbg;
  827. l2cdbg.u64 = 0;
  828. if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
  829. l2cdbg.s.ppnum = cvmx_get_core_num();
  830. l2cdbg.s.finv = 1;
  831. l2cdbg.s.set = assoc;
  832. cvmx_spinlock_lock(&cvmx_l2c_spinlock);
  833. /*
  834. * Enter debug mode, and make sure all other writes
  835. * complete before we enter debug mode
  836. */
  837. CVMX_SYNC;
  838. cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
  839. cvmx_read_csr(CVMX_L2C_DBG);
  840. CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
  841. index * CVMX_CACHE_LINE_SIZE),
  842. 0);
  843. /* Exit debug mode */
  844. CVMX_SYNC;
  845. cvmx_write_csr(CVMX_L2C_DBG, 0);
  846. cvmx_read_csr(CVMX_L2C_DBG);
  847. cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
  848. }
  849. }