cache-uniphier.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510
  1. /*
  2. * Copyright (C) 2015-2016 Socionext Inc.
  3. * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #define pr_fmt(fmt) "uniphier: " fmt
  16. #include <linux/init.h>
  17. #include <linux/io.h>
  18. #include <linux/log2.h>
  19. #include <linux/of_address.h>
  20. #include <linux/slab.h>
  21. #include <asm/hardware/cache-uniphier.h>
  22. #include <asm/outercache.h>
  23. /* control registers */
  24. #define UNIPHIER_SSCC 0x0 /* Control Register */
  25. #define UNIPHIER_SSCC_BST BIT(20) /* UCWG burst read */
  26. #define UNIPHIER_SSCC_ACT BIT(19) /* Inst-Data separate */
  27. #define UNIPHIER_SSCC_WTG BIT(18) /* WT gathering on */
  28. #define UNIPHIER_SSCC_PRD BIT(17) /* enable pre-fetch */
  29. #define UNIPHIER_SSCC_ON BIT(0) /* enable cache */
  30. #define UNIPHIER_SSCLPDAWCR 0x30 /* Unified/Data Active Way Control */
  31. #define UNIPHIER_SSCLPIAWCR 0x34 /* Instruction Active Way Control */
  32. /* revision registers */
  33. #define UNIPHIER_SSCID 0x0 /* ID Register */
  34. /* operation registers */
  35. #define UNIPHIER_SSCOPE 0x244 /* Cache Operation Primitive Entry */
  36. #define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
  37. #define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
  38. #define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
  39. #define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
  40. #define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
  41. #define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */
  42. #define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
  43. #define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
  44. #define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
  45. #define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */
  46. #define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
  47. #define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
  48. #define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
  49. #define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */
  50. #define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */
  51. #define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/
  52. #define UNIPHIER_SSCOPPQSEF_FE BIT(1)
  53. #define UNIPHIER_SSCOPPQSEF_OE BIT(0)
  54. #define UNIPHIER_SSCOLPQS 0x260 /* Cache Operation Queue Status */
  55. #define UNIPHIER_SSCOLPQS_EF BIT(2)
  56. #define UNIPHIER_SSCOLPQS_EST BIT(1)
  57. #define UNIPHIER_SSCOLPQS_QST BIT(0)
  58. /* Is the operation region specified by address range? */
  59. #define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
  60. ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
  61. /**
  62. * uniphier_cache_data - UniPhier outer cache specific data
  63. *
  64. * @ctrl_base: virtual base address of control registers
  65. * @rev_base: virtual base address of revision registers
  66. * @op_base: virtual base address of operation registers
  67. * @way_present_mask: each bit specifies if the way is present
  68. * @way_locked_mask: each bit specifies if the way is locked
  69. * @nsets: number of associativity sets
  70. * @line_size: line size in bytes
  71. * @range_op_max_size: max size that can be handled by a single range operation
  72. * @list: list node to include this level in the whole cache hierarchy
  73. */
  74. struct uniphier_cache_data {
  75. void __iomem *ctrl_base;
  76. void __iomem *rev_base;
  77. void __iomem *op_base;
  78. void __iomem *way_ctrl_base;
  79. u32 way_present_mask;
  80. u32 way_locked_mask;
  81. u32 nsets;
  82. u32 line_size;
  83. u32 range_op_max_size;
  84. struct list_head list;
  85. };
  86. /*
  87. * List of the whole outer cache hierarchy. This list is only modified during
  88. * the early boot stage, so no mutex is taken for the access to the list.
  89. */
  90. static LIST_HEAD(uniphier_cache_list);
  91. /**
  92. * __uniphier_cache_sync - perform a sync point for a particular cache level
  93. *
  94. * @data: cache controller specific data
  95. */
  96. static void __uniphier_cache_sync(struct uniphier_cache_data *data)
  97. {
  98. /* This sequence need not be atomic. Do not disable IRQ. */
  99. writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC,
  100. data->op_base + UNIPHIER_SSCOPE);
  101. /* need a read back to confirm */
  102. readl_relaxed(data->op_base + UNIPHIER_SSCOPE);
  103. }
  104. /**
  105. * __uniphier_cache_maint_common - run a queue operation for a particular level
  106. *
  107. * @data: cache controller specific data
  108. * @start: start address of range operation (don't care for "all" operation)
  109. * @size: data size of range operation (don't care for "all" operation)
  110. * @operation: flags to specify the desired cache operation
  111. */
  112. static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
  113. unsigned long start,
  114. unsigned long size,
  115. u32 operation)
  116. {
  117. unsigned long flags;
  118. /*
  119. * No spin lock is necessary here because:
  120. *
  121. * [1] This outer cache controller is able to accept maintenance
  122. * operations from multiple CPUs at a time in an SMP system; if a
  123. * maintenance operation is under way and another operation is issued,
  124. * the new one is stored in the queue. The controller performs one
  125. * operation after another. If the queue is full, the status register,
  126. * UNIPHIER_SSCOPPQSEF, indicates that the queue registration has
  127. * failed. The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have
  128. * different instances for each CPU, i.e. each CPU can track the status
  129. * of the maintenance operations triggered by itself.
  130. *
  131. * [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ,
  132. * SSCOQWN}, are shared between multiple CPUs, but the hardware still
  133. * guarantees the registration sequence is atomic; the write access to
  134. * them are arbitrated by the hardware. The first accessor to the
  135. * register, UNIPHIER_SSCOQM, holds the access right and it is released
  136. * by reading the status register, UNIPHIER_SSCOPPQSEF. While one CPU
  137. * is holding the access right, other CPUs fail to register operations.
  138. * One CPU should not hold the access right for a long time, so local
  139. * IRQs should be disabled while the following sequence.
  140. */
  141. local_irq_save(flags);
  142. /* clear the complete notification flag */
  143. writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS);
  144. do {
  145. /* set cache operation */
  146. writel_relaxed(UNIPHIER_SSCOQM_CE | operation,
  147. data->op_base + UNIPHIER_SSCOQM);
  148. /* set address range if needed */
  149. if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) {
  150. writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
  151. writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
  152. }
  153. } while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
  154. (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
  155. /* wait until the operation is completed */
  156. while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) !=
  157. UNIPHIER_SSCOLPQS_EF))
  158. cpu_relax();
  159. local_irq_restore(flags);
  160. }
  161. static void __uniphier_cache_maint_all(struct uniphier_cache_data *data,
  162. u32 operation)
  163. {
  164. __uniphier_cache_maint_common(data, 0, 0,
  165. UNIPHIER_SSCOQM_S_ALL | operation);
  166. __uniphier_cache_sync(data);
  167. }
  168. static void __uniphier_cache_maint_range(struct uniphier_cache_data *data,
  169. unsigned long start, unsigned long end,
  170. u32 operation)
  171. {
  172. unsigned long size;
  173. /*
  174. * If the start address is not aligned,
  175. * perform a cache operation for the first cache-line
  176. */
  177. start = start & ~(data->line_size - 1);
  178. size = end - start;
  179. if (unlikely(size >= (unsigned long)(-data->line_size))) {
  180. /* this means cache operation for all range */
  181. __uniphier_cache_maint_all(data, operation);
  182. return;
  183. }
  184. /*
  185. * If the end address is not aligned,
  186. * perform a cache operation for the last cache-line
  187. */
  188. size = ALIGN(size, data->line_size);
  189. while (size) {
  190. unsigned long chunk_size = min_t(unsigned long, size,
  191. data->range_op_max_size);
  192. __uniphier_cache_maint_common(data, start, chunk_size,
  193. UNIPHIER_SSCOQM_S_RANGE | operation);
  194. start += chunk_size;
  195. size -= chunk_size;
  196. }
  197. __uniphier_cache_sync(data);
  198. }
  199. static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
  200. {
  201. u32 val = 0;
  202. if (on)
  203. val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON;
  204. writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
  205. }
  206. static void __init __uniphier_cache_set_locked_ways(
  207. struct uniphier_cache_data *data,
  208. u32 way_mask)
  209. {
  210. unsigned int cpu;
  211. data->way_locked_mask = way_mask & data->way_present_mask;
  212. for_each_possible_cpu(cpu)
  213. writel_relaxed(~data->way_locked_mask & data->way_present_mask,
  214. data->way_ctrl_base + 4 * cpu);
  215. }
  216. static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
  217. u32 operation)
  218. {
  219. struct uniphier_cache_data *data;
  220. list_for_each_entry(data, &uniphier_cache_list, list)
  221. __uniphier_cache_maint_range(data, start, end, operation);
  222. }
  223. static void uniphier_cache_maint_all(u32 operation)
  224. {
  225. struct uniphier_cache_data *data;
  226. list_for_each_entry(data, &uniphier_cache_list, list)
  227. __uniphier_cache_maint_all(data, operation);
  228. }
  229. static void uniphier_cache_inv_range(unsigned long start, unsigned long end)
  230. {
  231. uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV);
  232. }
  233. static void uniphier_cache_clean_range(unsigned long start, unsigned long end)
  234. {
  235. uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN);
  236. }
  237. static void uniphier_cache_flush_range(unsigned long start, unsigned long end)
  238. {
  239. uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH);
  240. }
  241. static void __init uniphier_cache_inv_all(void)
  242. {
  243. uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
  244. }
  245. static void uniphier_cache_flush_all(void)
  246. {
  247. uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
  248. }
  249. static void uniphier_cache_disable(void)
  250. {
  251. struct uniphier_cache_data *data;
  252. list_for_each_entry_reverse(data, &uniphier_cache_list, list)
  253. __uniphier_cache_enable(data, false);
  254. uniphier_cache_flush_all();
  255. }
  256. static void __init uniphier_cache_enable(void)
  257. {
  258. struct uniphier_cache_data *data;
  259. uniphier_cache_inv_all();
  260. list_for_each_entry(data, &uniphier_cache_list, list) {
  261. __uniphier_cache_enable(data, true);
  262. __uniphier_cache_set_locked_ways(data, 0);
  263. }
  264. }
  265. static void uniphier_cache_sync(void)
  266. {
  267. struct uniphier_cache_data *data;
  268. list_for_each_entry(data, &uniphier_cache_list, list)
  269. __uniphier_cache_sync(data);
  270. }
  271. static const struct of_device_id uniphier_cache_match[] __initconst = {
  272. { .compatible = "socionext,uniphier-system-cache" },
  273. { /* sentinel */ }
  274. };
  275. static int __init __uniphier_cache_init(struct device_node *np,
  276. unsigned int *cache_level)
  277. {
  278. struct uniphier_cache_data *data;
  279. u32 level, cache_size;
  280. struct device_node *next_np;
  281. int ret = 0;
  282. if (!of_match_node(uniphier_cache_match, np)) {
  283. pr_err("L%d: not compatible with uniphier cache\n",
  284. *cache_level);
  285. return -EINVAL;
  286. }
  287. if (of_property_read_u32(np, "cache-level", &level)) {
  288. pr_err("L%d: cache-level is not specified\n", *cache_level);
  289. return -EINVAL;
  290. }
  291. if (level != *cache_level) {
  292. pr_err("L%d: cache-level is unexpected value %d\n",
  293. *cache_level, level);
  294. return -EINVAL;
  295. }
  296. if (!of_property_read_bool(np, "cache-unified")) {
  297. pr_err("L%d: cache-unified is not specified\n", *cache_level);
  298. return -EINVAL;
  299. }
  300. data = kzalloc(sizeof(*data), GFP_KERNEL);
  301. if (!data)
  302. return -ENOMEM;
  303. if (of_property_read_u32(np, "cache-line-size", &data->line_size) ||
  304. !is_power_of_2(data->line_size)) {
  305. pr_err("L%d: cache-line-size is unspecified or invalid\n",
  306. *cache_level);
  307. ret = -EINVAL;
  308. goto err;
  309. }
  310. if (of_property_read_u32(np, "cache-sets", &data->nsets) ||
  311. !is_power_of_2(data->nsets)) {
  312. pr_err("L%d: cache-sets is unspecified or invalid\n",
  313. *cache_level);
  314. ret = -EINVAL;
  315. goto err;
  316. }
  317. if (of_property_read_u32(np, "cache-size", &cache_size) ||
  318. cache_size == 0 || cache_size % (data->nsets * data->line_size)) {
  319. pr_err("L%d: cache-size is unspecified or invalid\n",
  320. *cache_level);
  321. ret = -EINVAL;
  322. goto err;
  323. }
  324. data->way_present_mask =
  325. ((u32)1 << cache_size / data->nsets / data->line_size) - 1;
  326. data->ctrl_base = of_iomap(np, 0);
  327. if (!data->ctrl_base) {
  328. pr_err("L%d: failed to map control register\n", *cache_level);
  329. ret = -ENOMEM;
  330. goto err;
  331. }
  332. data->rev_base = of_iomap(np, 1);
  333. if (!data->rev_base) {
  334. pr_err("L%d: failed to map revision register\n", *cache_level);
  335. ret = -ENOMEM;
  336. goto err;
  337. }
  338. data->op_base = of_iomap(np, 2);
  339. if (!data->op_base) {
  340. pr_err("L%d: failed to map operation register\n", *cache_level);
  341. ret = -ENOMEM;
  342. goto err;
  343. }
  344. data->way_ctrl_base = data->ctrl_base + 0xc00;
  345. if (*cache_level == 2) {
  346. u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
  347. /*
  348. * The size of range operation is limited to (1 << 22) or less
  349. * for PH-sLD8 or older SoCs.
  350. */
  351. if (revision <= 0x16)
  352. data->range_op_max_size = (u32)1 << 22;
  353. /*
  354. * Unfortunatly, the offset address of active way control base
  355. * varies from SoC to SoC.
  356. */
  357. switch (revision) {
  358. case 0x11: /* sLD3 */
  359. data->way_ctrl_base = data->ctrl_base + 0x870;
  360. break;
  361. case 0x12: /* LD4 */
  362. case 0x16: /* sld8 */
  363. data->way_ctrl_base = data->ctrl_base + 0x840;
  364. break;
  365. default:
  366. break;
  367. }
  368. }
  369. data->range_op_max_size -= data->line_size;
  370. INIT_LIST_HEAD(&data->list);
  371. list_add_tail(&data->list, &uniphier_cache_list); /* no mutex */
  372. /*
  373. * OK, this level has been successfully initialized. Look for the next
  374. * level cache. Do not roll back even if the initialization of the
  375. * next level cache fails because we want to continue with available
  376. * cache levels.
  377. */
  378. next_np = of_find_next_cache_node(np);
  379. if (next_np) {
  380. (*cache_level)++;
  381. ret = __uniphier_cache_init(next_np, cache_level);
  382. }
  383. of_node_put(next_np);
  384. return ret;
  385. err:
  386. iounmap(data->op_base);
  387. iounmap(data->rev_base);
  388. iounmap(data->ctrl_base);
  389. kfree(data);
  390. return ret;
  391. }
  392. int __init uniphier_cache_init(void)
  393. {
  394. struct device_node *np = NULL;
  395. unsigned int cache_level;
  396. int ret = 0;
  397. /* look for level 2 cache */
  398. while ((np = of_find_matching_node(np, uniphier_cache_match)))
  399. if (!of_property_read_u32(np, "cache-level", &cache_level) &&
  400. cache_level == 2)
  401. break;
  402. if (!np)
  403. return -ENODEV;
  404. ret = __uniphier_cache_init(np, &cache_level);
  405. of_node_put(np);
  406. if (ret) {
  407. /*
  408. * Error out iif L2 initialization fails. Continue with any
  409. * error on L3 or outer because they are optional.
  410. */
  411. if (cache_level == 2) {
  412. pr_err("failed to initialize L2 cache\n");
  413. return ret;
  414. }
  415. cache_level--;
  416. ret = 0;
  417. }
  418. outer_cache.inv_range = uniphier_cache_inv_range;
  419. outer_cache.clean_range = uniphier_cache_clean_range;
  420. outer_cache.flush_range = uniphier_cache_flush_range;
  421. outer_cache.flush_all = uniphier_cache_flush_all;
  422. outer_cache.disable = uniphier_cache_disable;
  423. outer_cache.sync = uniphier_cache_sync;
  424. uniphier_cache_enable();
  425. pr_info("enabled outer cache (cache level: %d)\n", cache_level);
  426. return ret;
  427. }