cache-tauros2.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. /*
  2. * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support
  3. *
  4. * Copyright (C) 2008 Marvell Semiconductor
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. *
  10. * References:
  11. * - PJ1 CPU Core Datasheet,
  12. * Document ID MV-S104837-01, Rev 0.7, January 24 2008.
  13. * - PJ4 CPU Core Datasheet,
  14. * Document ID MV-S105190-00, Rev 0.7, March 14 2008.
  15. */
  16. #include <linux/init.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/cp15.h>
  19. #include <asm/hardware/cache-tauros2.h>
  20. /*
  21. * When Tauros2 is used on a CPU that supports the v7 hierarchical
  22. * cache operations, the cache handling code in proc-v7.S takes care
  23. * of everything, including handling DMA coherency.
  24. *
  25. * So, we only need to register outer cache operations here if we're
  26. * being used on a pre-v7 CPU, and we only need to build support for
  27. * outer cache operations into the kernel image if the kernel has been
  28. * configured to support a pre-v7 CPU.
  29. */
  30. #if __LINUX_ARM_ARCH__ < 7
  31. /*
  32. * Low-level cache maintenance operations.
  33. */
  34. static inline void tauros2_clean_pa(unsigned long addr)
  35. {
  36. __asm__("mcr p15, 1, %0, c7, c11, 3" : : "r" (addr));
  37. }
  38. static inline void tauros2_clean_inv_pa(unsigned long addr)
  39. {
  40. __asm__("mcr p15, 1, %0, c7, c15, 3" : : "r" (addr));
  41. }
  42. static inline void tauros2_inv_pa(unsigned long addr)
  43. {
  44. __asm__("mcr p15, 1, %0, c7, c7, 3" : : "r" (addr));
  45. }
  46. /*
  47. * Linux primitives.
  48. *
  49. * Note that the end addresses passed to Linux primitives are
  50. * noninclusive.
  51. */
  52. #define CACHE_LINE_SIZE 32
  53. static void tauros2_inv_range(unsigned long start, unsigned long end)
  54. {
  55. /*
  56. * Clean and invalidate partial first cache line.
  57. */
  58. if (start & (CACHE_LINE_SIZE - 1)) {
  59. tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
  60. start = (start | (CACHE_LINE_SIZE - 1)) + 1;
  61. }
  62. /*
  63. * Clean and invalidate partial last cache line.
  64. */
  65. if (end & (CACHE_LINE_SIZE - 1)) {
  66. tauros2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
  67. end &= ~(CACHE_LINE_SIZE - 1);
  68. }
  69. /*
  70. * Invalidate all full cache lines between 'start' and 'end'.
  71. */
  72. while (start < end) {
  73. tauros2_inv_pa(start);
  74. start += CACHE_LINE_SIZE;
  75. }
  76. dsb();
  77. }
  78. static void tauros2_clean_range(unsigned long start, unsigned long end)
  79. {
  80. start &= ~(CACHE_LINE_SIZE - 1);
  81. while (start < end) {
  82. tauros2_clean_pa(start);
  83. start += CACHE_LINE_SIZE;
  84. }
  85. dsb();
  86. }
  87. static void tauros2_flush_range(unsigned long start, unsigned long end)
  88. {
  89. start &= ~(CACHE_LINE_SIZE - 1);
  90. while (start < end) {
  91. tauros2_clean_inv_pa(start);
  92. start += CACHE_LINE_SIZE;
  93. }
  94. dsb();
  95. }
  96. #endif
  97. static inline u32 __init read_extra_features(void)
  98. {
  99. u32 u;
  100. __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
  101. return u;
  102. }
  103. static inline void __init write_extra_features(u32 u)
  104. {
  105. __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
  106. }
  107. static void __init disable_l2_prefetch(void)
  108. {
  109. u32 u;
  110. /*
  111. * Read the CPU Extra Features register and verify that the
  112. * Disable L2 Prefetch bit is set.
  113. */
  114. u = read_extra_features();
  115. if (!(u & 0x01000000)) {
  116. printk(KERN_INFO "Tauros2: Disabling L2 prefetch.\n");
  117. write_extra_features(u | 0x01000000);
  118. }
  119. }
  120. static inline int __init cpuid_scheme(void)
  121. {
  122. extern int processor_id;
  123. return !!((processor_id & 0x000f0000) == 0x000f0000);
  124. }
  125. static inline u32 __init read_mmfr3(void)
  126. {
  127. u32 mmfr3;
  128. __asm__("mrc p15, 0, %0, c0, c1, 7\n" : "=r" (mmfr3));
  129. return mmfr3;
  130. }
  131. static inline u32 __init read_actlr(void)
  132. {
  133. u32 actlr;
  134. __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
  135. return actlr;
  136. }
  137. static inline void __init write_actlr(u32 actlr)
  138. {
  139. __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
  140. }
  141. void __init tauros2_init(void)
  142. {
  143. extern int processor_id;
  144. char *mode;
  145. disable_l2_prefetch();
  146. #ifdef CONFIG_CPU_32v5
  147. if ((processor_id & 0xff0f0000) == 0x56050000) {
  148. u32 feat;
  149. /*
  150. * v5 CPUs with Tauros2 have the L2 cache enable bit
  151. * located in the CPU Extra Features register.
  152. */
  153. feat = read_extra_features();
  154. if (!(feat & 0x00400000)) {
  155. printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
  156. write_extra_features(feat | 0x00400000);
  157. }
  158. mode = "ARMv5";
  159. outer_cache.inv_range = tauros2_inv_range;
  160. outer_cache.clean_range = tauros2_clean_range;
  161. outer_cache.flush_range = tauros2_flush_range;
  162. }
  163. #endif
  164. #ifdef CONFIG_CPU_32v6
  165. /*
  166. * Check whether this CPU lacks support for the v7 hierarchical
  167. * cache ops. (PJ4 is in its v6 personality mode if the MMFR3
  168. * register indicates no support for the v7 hierarchical cache
  169. * ops.)
  170. */
  171. if (cpuid_scheme() && (read_mmfr3() & 0xf) == 0) {
  172. /*
  173. * When Tauros2 is used in an ARMv6 system, the L2
  174. * enable bit is in the ARMv6 ARM-mandated position
  175. * (bit [26] of the System Control Register).
  176. */
  177. if (!(get_cr() & 0x04000000)) {
  178. printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
  179. adjust_cr(0x04000000, 0x04000000);
  180. }
  181. mode = "ARMv6";
  182. outer_cache.inv_range = tauros2_inv_range;
  183. outer_cache.clean_range = tauros2_clean_range;
  184. outer_cache.flush_range = tauros2_flush_range;
  185. }
  186. #endif
  187. #ifdef CONFIG_CPU_32v7
  188. /*
  189. * Check whether this CPU has support for the v7 hierarchical
  190. * cache ops. (PJ4 is in its v7 personality mode if the MMFR3
  191. * register indicates support for the v7 hierarchical cache
  192. * ops.)
  193. *
  194. * (Although strictly speaking there may exist CPUs that
  195. * implement the v7 cache ops but are only ARMv6 CPUs (due to
  196. * not complying with all of the other ARMv7 requirements),
  197. * there are no real-life examples of Tauros2 being used on
  198. * such CPUs as of yet.)
  199. */
  200. if (cpuid_scheme() && (read_mmfr3() & 0xf) == 1) {
  201. u32 actlr;
  202. /*
  203. * When Tauros2 is used in an ARMv7 system, the L2
  204. * enable bit is located in the Auxiliary System Control
  205. * Register (which is the only register allowed by the
  206. * ARMv7 spec to contain fine-grained cache control bits).
  207. */
  208. actlr = read_actlr();
  209. if (!(actlr & 0x00000002)) {
  210. printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
  211. write_actlr(actlr | 0x00000002);
  212. }
  213. mode = "ARMv7";
  214. }
  215. #endif
  216. if (mode == NULL) {
  217. printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n");
  218. return;
  219. }
  220. printk(KERN_INFO "Tauros2: L2 cache support initialised "
  221. "in %s mode.\n", mode);
  222. }