cache-v6.S 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. /*
  2. * linux/arch/arm/mm/cache-v6.S
  3. *
  4. * Copyright (C) 2001 Deep Blue Solutions Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This is the "shell" of the ARMv6 processor support.
  11. */
  12. #include <linux/linkage.h>
  13. #include <linux/init.h>
  14. #include <asm/assembler.h>
  15. #include <asm/unwind.h>
  16. #include "proc-macros.S"
  17. #define HARVARD_CACHE
  18. #define CACHE_LINE_SIZE 32
  19. #define D_CACHE_LINE_SIZE 32
  20. #define BTB_FLUSH_SIZE 8
  21. /*
  22. * v6_flush_icache_all()
  23. *
  24. * Flush the whole I-cache.
  25. *
  26. * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
  27. * This erratum is present in 1136, 1156 and 1176. It does not affect the
  28. * MPCore.
  29. *
  30. * Registers:
  31. * r0 - set to 0
  32. * r1 - corrupted
  33. */
  34. ENTRY(v6_flush_icache_all)
  35. mov r0, #0
  36. #ifdef CONFIG_ARM_ERRATA_411920
  37. mrs r1, cpsr
  38. cpsid ifa @ disable interrupts
  39. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  40. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  41. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  42. mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
  43. msr cpsr_cx, r1 @ restore interrupts
  44. .rept 11 @ ARM Ltd recommends at least
  45. nop @ 11 NOPs
  46. .endr
  47. #else
  48. mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
  49. #endif
  50. mov pc, lr
  51. ENDPROC(v6_flush_icache_all)
  52. /*
  53. * v6_flush_cache_all()
  54. *
  55. * Flush the entire cache.
  56. *
  57. * It is assumed that:
  58. */
  59. ENTRY(v6_flush_kern_cache_all)
  60. mov r0, #0
  61. #ifdef HARVARD_CACHE
  62. mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
  63. #ifndef CONFIG_ARM_ERRATA_411920
  64. mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
  65. #else
  66. b v6_flush_icache_all
  67. #endif
  68. #else
  69. mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
  70. #endif
  71. mov pc, lr
  72. /*
  73. * v6_flush_cache_all()
  74. *
  75. * Flush all TLB entries in a particular address space
  76. *
  77. * - mm - mm_struct describing address space
  78. */
  79. ENTRY(v6_flush_user_cache_all)
  80. /*FALLTHROUGH*/
  81. /*
  82. * v6_flush_cache_range(start, end, flags)
  83. *
  84. * Flush a range of TLB entries in the specified address space.
  85. *
  86. * - start - start address (may not be aligned)
  87. * - end - end address (exclusive, may not be aligned)
  88. * - flags - vm_area_struct flags describing address space
  89. *
  90. * It is assumed that:
  91. * - we have a VIPT cache.
  92. */
  93. ENTRY(v6_flush_user_cache_range)
  94. mov pc, lr
  95. /*
  96. * v6_coherent_kern_range(start,end)
  97. *
  98. * Ensure that the I and D caches are coherent within specified
  99. * region. This is typically used when code has been written to
  100. * a memory region, and will be executed.
  101. *
  102. * - start - virtual start address of region
  103. * - end - virtual end address of region
  104. *
  105. * It is assumed that:
  106. * - the Icache does not read data from the write buffer
  107. */
  108. ENTRY(v6_coherent_kern_range)
  109. /* FALLTHROUGH */
  110. /*
  111. * v6_coherent_user_range(start,end)
  112. *
  113. * Ensure that the I and D caches are coherent within specified
  114. * region. This is typically used when code has been written to
  115. * a memory region, and will be executed.
  116. *
  117. * - start - virtual start address of region
  118. * - end - virtual end address of region
  119. *
  120. * It is assumed that:
  121. * - the Icache does not read data from the write buffer
  122. */
  123. ENTRY(v6_coherent_user_range)
  124. UNWIND(.fnstart )
  125. #ifdef HARVARD_CACHE
  126. bic r0, r0, #CACHE_LINE_SIZE - 1
  127. 1:
  128. USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line
  129. add r0, r0, #CACHE_LINE_SIZE
  130. 2:
  131. cmp r0, r1
  132. blo 1b
  133. #endif
  134. mov r0, #0
  135. #ifdef HARVARD_CACHE
  136. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  137. #ifndef CONFIG_ARM_ERRATA_411920
  138. mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
  139. #else
  140. b v6_flush_icache_all
  141. #endif
  142. #else
  143. mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
  144. #endif
  145. mov pc, lr
  146. /*
  147. * Fault handling for the cache operation above. If the virtual address in r0
  148. * isn't mapped, just try the next page.
  149. */
  150. 9001:
  151. mov r0, r0, lsr #12
  152. mov r0, r0, lsl #12
  153. add r0, r0, #4096
  154. b 2b
  155. UNWIND(.fnend )
  156. ENDPROC(v6_coherent_user_range)
  157. ENDPROC(v6_coherent_kern_range)
  158. /*
  159. * v6_flush_kern_dcache_area(void *addr, size_t size)
  160. *
  161. * Ensure that the data held in the page kaddr is written back
  162. * to the page in question.
  163. *
  164. * - addr - kernel address
  165. * - size - region size
  166. */
  167. ENTRY(v6_flush_kern_dcache_area)
  168. add r1, r0, r1
  169. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  170. 1:
  171. #ifdef HARVARD_CACHE
  172. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  173. #else
  174. mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate unified line
  175. #endif
  176. add r0, r0, #D_CACHE_LINE_SIZE
  177. cmp r0, r1
  178. blo 1b
  179. #ifdef HARVARD_CACHE
  180. mov r0, #0
  181. mcr p15, 0, r0, c7, c10, 4
  182. #endif
  183. mov pc, lr
  184. /*
  185. * v6_dma_inv_range(start,end)
  186. *
  187. * Invalidate the data cache within the specified region; we will
  188. * be performing a DMA operation in this region and we want to
  189. * purge old data in the cache.
  190. *
  191. * - start - virtual start address of region
  192. * - end - virtual end address of region
  193. */
  194. v6_dma_inv_range:
  195. #ifdef CONFIG_DMA_CACHE_RWFO
  196. ldrb r2, [r0] @ read for ownership
  197. strb r2, [r0] @ write for ownership
  198. #endif
  199. tst r0, #D_CACHE_LINE_SIZE - 1
  200. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  201. #ifdef HARVARD_CACHE
  202. mcrne p15, 0, r0, c7, c10, 1 @ clean D line
  203. #else
  204. mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
  205. #endif
  206. tst r1, #D_CACHE_LINE_SIZE - 1
  207. #ifdef CONFIG_DMA_CACHE_RWFO
  208. ldrneb r2, [r1, #-1] @ read for ownership
  209. strneb r2, [r1, #-1] @ write for ownership
  210. #endif
  211. bic r1, r1, #D_CACHE_LINE_SIZE - 1
  212. #ifdef HARVARD_CACHE
  213. mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
  214. #else
  215. mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
  216. #endif
  217. 1:
  218. #ifdef HARVARD_CACHE
  219. mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
  220. #else
  221. mcr p15, 0, r0, c7, c7, 1 @ invalidate unified line
  222. #endif
  223. add r0, r0, #D_CACHE_LINE_SIZE
  224. cmp r0, r1
  225. #ifdef CONFIG_DMA_CACHE_RWFO
  226. ldrlo r2, [r0] @ read for ownership
  227. strlo r2, [r0] @ write for ownership
  228. #endif
  229. blo 1b
  230. mov r0, #0
  231. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  232. mov pc, lr
  233. /*
  234. * v6_dma_clean_range(start,end)
  235. * - start - virtual start address of region
  236. * - end - virtual end address of region
  237. */
  238. v6_dma_clean_range:
  239. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  240. 1:
  241. #ifdef CONFIG_DMA_CACHE_RWFO
  242. ldr r2, [r0] @ read for ownership
  243. #endif
  244. #ifdef HARVARD_CACHE
  245. mcr p15, 0, r0, c7, c10, 1 @ clean D line
  246. #else
  247. mcr p15, 0, r0, c7, c11, 1 @ clean unified line
  248. #endif
  249. add r0, r0, #D_CACHE_LINE_SIZE
  250. cmp r0, r1
  251. blo 1b
  252. mov r0, #0
  253. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  254. mov pc, lr
  255. /*
  256. * v6_dma_flush_range(start,end)
  257. * - start - virtual start address of region
  258. * - end - virtual end address of region
  259. */
  260. ENTRY(v6_dma_flush_range)
  261. #ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
  262. sub r2, r1, r0
  263. cmp r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
  264. bhi v6_dma_flush_dcache_all
  265. #endif
  266. #ifdef CONFIG_DMA_CACHE_RWFO
  267. ldrb r2, [r0] @ read for ownership
  268. strb r2, [r0] @ write for ownership
  269. #endif
  270. bic r0, r0, #D_CACHE_LINE_SIZE - 1
  271. 1:
  272. #ifdef HARVARD_CACHE
  273. mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
  274. #else
  275. mcr p15, 0, r0, c7, c15, 1 @ clean & invalidate line
  276. #endif
  277. add r0, r0, #D_CACHE_LINE_SIZE
  278. cmp r0, r1
  279. #ifdef CONFIG_DMA_CACHE_RWFO
  280. ldrlob r2, [r0] @ read for ownership
  281. strlob r2, [r0] @ write for ownership
  282. #endif
  283. blo 1b
  284. mov r0, #0
  285. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  286. mov pc, lr
  287. #ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
  288. v6_dma_flush_dcache_all:
  289. mov r0, #0
  290. #ifdef HARVARD_CACHE
  291. mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate
  292. #else
  293. mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
  294. #endif
  295. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  296. mov pc, lr
  297. #endif
  298. /*
  299. * dma_map_area(start, size, dir)
  300. * - start - kernel virtual start address
  301. * - size - size of region
  302. * - dir - DMA direction
  303. */
  304. ENTRY(v6_dma_map_area)
  305. add r1, r1, r0
  306. teq r2, #DMA_FROM_DEVICE
  307. beq v6_dma_inv_range
  308. #ifndef CONFIG_DMA_CACHE_RWFO
  309. b v6_dma_clean_range
  310. #else
  311. teq r2, #DMA_TO_DEVICE
  312. beq v6_dma_clean_range
  313. b v6_dma_flush_range
  314. #endif
  315. ENDPROC(v6_dma_map_area)
  316. /*
  317. * dma_unmap_area(start, size, dir)
  318. * - start - kernel virtual start address
  319. * - size - size of region
  320. * - dir - DMA direction
  321. */
  322. ENTRY(v6_dma_unmap_area)
  323. #ifndef CONFIG_DMA_CACHE_RWFO
  324. add r1, r1, r0
  325. teq r2, #DMA_TO_DEVICE
  326. bne v6_dma_inv_range
  327. #endif
  328. mov pc, lr
  329. ENDPROC(v6_dma_unmap_area)
  330. .globl v6_flush_kern_cache_louis
  331. .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all
  332. __INITDATA
  333. @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
  334. define_cache_functions v6