cache.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. /*
  2. * Cache control for MicroBlaze cache memories
  3. *
  4. * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2007-2009 PetaLogix
  6. * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
  7. *
  8. * This file is subject to the terms and conditions of the GNU General
  9. * Public License. See the file COPYING in the main directory of this
  10. * archive for more details.
  11. */
  12. #include <asm/cacheflush.h>
  13. #include <linux/cache.h>
  14. #include <asm/cpuinfo.h>
  15. #include <asm/pvr.h>
  16. static inline void __enable_icache_msr(void)
  17. {
  18. __asm__ __volatile__ (" msrset r0, %0;" \
  19. "nop;" \
  20. : : "i" (MSR_ICE) : "memory");
  21. }
  22. static inline void __disable_icache_msr(void)
  23. {
  24. __asm__ __volatile__ (" msrclr r0, %0;" \
  25. "nop;" \
  26. : : "i" (MSR_ICE) : "memory");
  27. }
  28. static inline void __enable_dcache_msr(void)
  29. {
  30. __asm__ __volatile__ (" msrset r0, %0;" \
  31. "nop;" \
  32. : : "i" (MSR_DCE) : "memory");
  33. }
  34. static inline void __disable_dcache_msr(void)
  35. {
  36. __asm__ __volatile__ (" msrclr r0, %0;" \
  37. "nop; " \
  38. : : "i" (MSR_DCE) : "memory");
  39. }
  40. static inline void __enable_icache_nomsr(void)
  41. {
  42. __asm__ __volatile__ (" mfs r12, rmsr;" \
  43. "nop;" \
  44. "ori r12, r12, %0;" \
  45. "mts rmsr, r12;" \
  46. "nop;" \
  47. : : "i" (MSR_ICE) : "memory", "r12");
  48. }
  49. static inline void __disable_icache_nomsr(void)
  50. {
  51. __asm__ __volatile__ (" mfs r12, rmsr;" \
  52. "nop;" \
  53. "andi r12, r12, ~%0;" \
  54. "mts rmsr, r12;" \
  55. "nop;" \
  56. : : "i" (MSR_ICE) : "memory", "r12");
  57. }
  58. static inline void __enable_dcache_nomsr(void)
  59. {
  60. __asm__ __volatile__ (" mfs r12, rmsr;" \
  61. "nop;" \
  62. "ori r12, r12, %0;" \
  63. "mts rmsr, r12;" \
  64. "nop;" \
  65. : : "i" (MSR_DCE) : "memory", "r12");
  66. }
  67. static inline void __disable_dcache_nomsr(void)
  68. {
  69. __asm__ __volatile__ (" mfs r12, rmsr;" \
  70. "nop;" \
  71. "andi r12, r12, ~%0;" \
  72. "mts rmsr, r12;" \
  73. "nop;" \
  74. : : "i" (MSR_DCE) : "memory", "r12");
  75. }
  76. /* Helper macro for computing the limits of cache range loops
  77. *
  78. * End address can be unaligned which is OK for C implementation.
  79. * ASM implementation align it in ASM macros
  80. */
  81. #define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
  82. do { \
  83. int align = ~(cache_line_length - 1); \
  84. end = min(start + cache_size, end); \
  85. start &= align; \
  86. } while (0)
  87. /*
  88. * Helper macro to loop over the specified cache_size/line_length and
  89. * execute 'op' on that cacheline
  90. */
  91. #define CACHE_ALL_LOOP(cache_size, line_length, op) \
  92. do { \
  93. unsigned int len = cache_size - line_length; \
  94. int step = -line_length; \
  95. WARN_ON(step >= 0); \
  96. \
  97. __asm__ __volatile__ (" 1: " #op " %0, r0;" \
  98. "bgtid %0, 1b;" \
  99. "addk %0, %0, %1;" \
  100. : : "r" (len), "r" (step) \
  101. : "memory"); \
  102. } while (0)
  103. /* Used for wdc.flush/clear which can use rB for offset which is not possible
  104. * to use for simple wdc or wic.
  105. *
  106. * start address is cache aligned
  107. * end address is not aligned, if end is aligned then I have to subtract
  108. * cacheline length because I can't flush/invalidate the next cacheline.
  109. * If is not, I align it because I will flush/invalidate whole line.
  110. */
  111. #define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
  112. do { \
  113. int step = -line_length; \
  114. int align = ~(line_length - 1); \
  115. int count; \
  116. end = ((end & align) == end) ? end - line_length : end & align; \
  117. count = end - start; \
  118. WARN_ON(count < 0); \
  119. \
  120. __asm__ __volatile__ (" 1: " #op " %0, %1;" \
  121. "bgtid %1, 1b;" \
  122. "addk %1, %1, %2;" \
  123. : : "r" (start), "r" (count), \
  124. "r" (step) : "memory"); \
  125. } while (0)
  126. /* It is used only first parameter for OP - for wic, wdc */
  127. #define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
  128. do { \
  129. unsigned int volatile temp = 0; \
  130. unsigned int align = ~(line_length - 1); \
  131. end = ((end & align) == end) ? end - line_length : end & align; \
  132. WARN_ON(end < start); \
  133. \
  134. __asm__ __volatile__ (" 1: " #op " %1, r0;" \
  135. "cmpu %0, %1, %2;" \
  136. "bgtid %0, 1b;" \
  137. "addk %1, %1, %3;" \
  138. : : "r" (temp), "r" (start), "r" (end), \
  139. "r" (line_length) : "memory"); \
  140. } while (0)
  141. #define ASM_LOOP
  142. static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
  143. {
  144. unsigned long flags;
  145. #ifndef ASM_LOOP
  146. int i;
  147. #endif
  148. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  149. (unsigned int)start, (unsigned int) end);
  150. CACHE_LOOP_LIMITS(start, end,
  151. cpuinfo.icache_line_length, cpuinfo.icache_size);
  152. local_irq_save(flags);
  153. __disable_icache_msr();
  154. #ifdef ASM_LOOP
  155. CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
  156. #else
  157. for (i = start; i < end; i += cpuinfo.icache_line_length)
  158. __asm__ __volatile__ ("wic %0, r0;" \
  159. : : "r" (i));
  160. #endif
  161. __enable_icache_msr();
  162. local_irq_restore(flags);
  163. }
  164. static void __flush_icache_range_nomsr_irq(unsigned long start,
  165. unsigned long end)
  166. {
  167. unsigned long flags;
  168. #ifndef ASM_LOOP
  169. int i;
  170. #endif
  171. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  172. (unsigned int)start, (unsigned int) end);
  173. CACHE_LOOP_LIMITS(start, end,
  174. cpuinfo.icache_line_length, cpuinfo.icache_size);
  175. local_irq_save(flags);
  176. __disable_icache_nomsr();
  177. #ifdef ASM_LOOP
  178. CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
  179. #else
  180. for (i = start; i < end; i += cpuinfo.icache_line_length)
  181. __asm__ __volatile__ ("wic %0, r0;" \
  182. : : "r" (i));
  183. #endif
  184. __enable_icache_nomsr();
  185. local_irq_restore(flags);
  186. }
  187. static void __flush_icache_range_noirq(unsigned long start,
  188. unsigned long end)
  189. {
  190. #ifndef ASM_LOOP
  191. int i;
  192. #endif
  193. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  194. (unsigned int)start, (unsigned int) end);
  195. CACHE_LOOP_LIMITS(start, end,
  196. cpuinfo.icache_line_length, cpuinfo.icache_size);
  197. #ifdef ASM_LOOP
  198. CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
  199. #else
  200. for (i = start; i < end; i += cpuinfo.icache_line_length)
  201. __asm__ __volatile__ ("wic %0, r0;" \
  202. : : "r" (i));
  203. #endif
  204. }
  205. static void __flush_icache_all_msr_irq(void)
  206. {
  207. unsigned long flags;
  208. #ifndef ASM_LOOP
  209. int i;
  210. #endif
  211. pr_debug("%s\n", __func__);
  212. local_irq_save(flags);
  213. __disable_icache_msr();
  214. #ifdef ASM_LOOP
  215. CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
  216. #else
  217. for (i = 0; i < cpuinfo.icache_size;
  218. i += cpuinfo.icache_line_length)
  219. __asm__ __volatile__ ("wic %0, r0;" \
  220. : : "r" (i));
  221. #endif
  222. __enable_icache_msr();
  223. local_irq_restore(flags);
  224. }
  225. static void __flush_icache_all_nomsr_irq(void)
  226. {
  227. unsigned long flags;
  228. #ifndef ASM_LOOP
  229. int i;
  230. #endif
  231. pr_debug("%s\n", __func__);
  232. local_irq_save(flags);
  233. __disable_icache_nomsr();
  234. #ifdef ASM_LOOP
  235. CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
  236. #else
  237. for (i = 0; i < cpuinfo.icache_size;
  238. i += cpuinfo.icache_line_length)
  239. __asm__ __volatile__ ("wic %0, r0;" \
  240. : : "r" (i));
  241. #endif
  242. __enable_icache_nomsr();
  243. local_irq_restore(flags);
  244. }
  245. static void __flush_icache_all_noirq(void)
  246. {
  247. #ifndef ASM_LOOP
  248. int i;
  249. #endif
  250. pr_debug("%s\n", __func__);
  251. #ifdef ASM_LOOP
  252. CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
  253. #else
  254. for (i = 0; i < cpuinfo.icache_size;
  255. i += cpuinfo.icache_line_length)
  256. __asm__ __volatile__ ("wic %0, r0;" \
  257. : : "r" (i));
  258. #endif
  259. }
  260. static void __invalidate_dcache_all_msr_irq(void)
  261. {
  262. unsigned long flags;
  263. #ifndef ASM_LOOP
  264. int i;
  265. #endif
  266. pr_debug("%s\n", __func__);
  267. local_irq_save(flags);
  268. __disable_dcache_msr();
  269. #ifdef ASM_LOOP
  270. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
  271. #else
  272. for (i = 0; i < cpuinfo.dcache_size;
  273. i += cpuinfo.dcache_line_length)
  274. __asm__ __volatile__ ("wdc %0, r0;" \
  275. : : "r" (i));
  276. #endif
  277. __enable_dcache_msr();
  278. local_irq_restore(flags);
  279. }
  280. static void __invalidate_dcache_all_nomsr_irq(void)
  281. {
  282. unsigned long flags;
  283. #ifndef ASM_LOOP
  284. int i;
  285. #endif
  286. pr_debug("%s\n", __func__);
  287. local_irq_save(flags);
  288. __disable_dcache_nomsr();
  289. #ifdef ASM_LOOP
  290. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
  291. #else
  292. for (i = 0; i < cpuinfo.dcache_size;
  293. i += cpuinfo.dcache_line_length)
  294. __asm__ __volatile__ ("wdc %0, r0;" \
  295. : : "r" (i));
  296. #endif
  297. __enable_dcache_nomsr();
  298. local_irq_restore(flags);
  299. }
  300. static void __invalidate_dcache_all_noirq_wt(void)
  301. {
  302. #ifndef ASM_LOOP
  303. int i;
  304. #endif
  305. pr_debug("%s\n", __func__);
  306. #ifdef ASM_LOOP
  307. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
  308. #else
  309. for (i = 0; i < cpuinfo.dcache_size;
  310. i += cpuinfo.dcache_line_length)
  311. __asm__ __volatile__ ("wdc %0, r0;" \
  312. : : "r" (i));
  313. #endif
  314. }
  315. /*
  316. * FIXME It is blindly invalidation as is expected
  317. * but can't be called on noMMU in microblaze_cache_init below
  318. *
  319. * MS: noMMU kernel won't boot if simple wdc is used
  320. * The reason should be that there are discared data which kernel needs
  321. */
  322. static void __invalidate_dcache_all_wb(void)
  323. {
  324. #ifndef ASM_LOOP
  325. int i;
  326. #endif
  327. pr_debug("%s\n", __func__);
  328. #ifdef ASM_LOOP
  329. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
  330. wdc);
  331. #else
  332. for (i = 0; i < cpuinfo.dcache_size;
  333. i += cpuinfo.dcache_line_length)
  334. __asm__ __volatile__ ("wdc %0, r0;" \
  335. : : "r" (i));
  336. #endif
  337. }
  338. static void __invalidate_dcache_range_wb(unsigned long start,
  339. unsigned long end)
  340. {
  341. #ifndef ASM_LOOP
  342. int i;
  343. #endif
  344. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  345. (unsigned int)start, (unsigned int) end);
  346. CACHE_LOOP_LIMITS(start, end,
  347. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  348. #ifdef ASM_LOOP
  349. CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
  350. #else
  351. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  352. __asm__ __volatile__ ("wdc.clear %0, r0;" \
  353. : : "r" (i));
  354. #endif
  355. }
  356. static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
  357. unsigned long end)
  358. {
  359. #ifndef ASM_LOOP
  360. int i;
  361. #endif
  362. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  363. (unsigned int)start, (unsigned int) end);
  364. CACHE_LOOP_LIMITS(start, end,
  365. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  366. #ifdef ASM_LOOP
  367. CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
  368. #else
  369. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  370. __asm__ __volatile__ ("wdc %0, r0;" \
  371. : : "r" (i));
  372. #endif
  373. }
  374. static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
  375. unsigned long end)
  376. {
  377. unsigned long flags;
  378. #ifndef ASM_LOOP
  379. int i;
  380. #endif
  381. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  382. (unsigned int)start, (unsigned int) end);
  383. CACHE_LOOP_LIMITS(start, end,
  384. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  385. local_irq_save(flags);
  386. __disable_dcache_msr();
  387. #ifdef ASM_LOOP
  388. CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
  389. #else
  390. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  391. __asm__ __volatile__ ("wdc %0, r0;" \
  392. : : "r" (i));
  393. #endif
  394. __enable_dcache_msr();
  395. local_irq_restore(flags);
  396. }
  397. static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
  398. unsigned long end)
  399. {
  400. unsigned long flags;
  401. #ifndef ASM_LOOP
  402. int i;
  403. #endif
  404. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  405. (unsigned int)start, (unsigned int) end);
  406. CACHE_LOOP_LIMITS(start, end,
  407. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  408. local_irq_save(flags);
  409. __disable_dcache_nomsr();
  410. #ifdef ASM_LOOP
  411. CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
  412. #else
  413. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  414. __asm__ __volatile__ ("wdc %0, r0;" \
  415. : : "r" (i));
  416. #endif
  417. __enable_dcache_nomsr();
  418. local_irq_restore(flags);
  419. }
  420. static void __flush_dcache_all_wb(void)
  421. {
  422. #ifndef ASM_LOOP
  423. int i;
  424. #endif
  425. pr_debug("%s\n", __func__);
  426. #ifdef ASM_LOOP
  427. CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
  428. wdc.flush);
  429. #else
  430. for (i = 0; i < cpuinfo.dcache_size;
  431. i += cpuinfo.dcache_line_length)
  432. __asm__ __volatile__ ("wdc.flush %0, r0;" \
  433. : : "r" (i));
  434. #endif
  435. }
  436. static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
  437. {
  438. #ifndef ASM_LOOP
  439. int i;
  440. #endif
  441. pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
  442. (unsigned int)start, (unsigned int) end);
  443. CACHE_LOOP_LIMITS(start, end,
  444. cpuinfo.dcache_line_length, cpuinfo.dcache_size);
  445. #ifdef ASM_LOOP
  446. CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
  447. #else
  448. for (i = start; i < end; i += cpuinfo.dcache_line_length)
  449. __asm__ __volatile__ ("wdc.flush %0, r0;" \
  450. : : "r" (i));
  451. #endif
  452. }
  453. /* struct for wb caches and for wt caches */
  454. struct scache *mbc;
  455. /* new wb cache model */
  456. static const struct scache wb_msr = {
  457. .ie = __enable_icache_msr,
  458. .id = __disable_icache_msr,
  459. .ifl = __flush_icache_all_noirq,
  460. .iflr = __flush_icache_range_noirq,
  461. .iin = __flush_icache_all_noirq,
  462. .iinr = __flush_icache_range_noirq,
  463. .de = __enable_dcache_msr,
  464. .dd = __disable_dcache_msr,
  465. .dfl = __flush_dcache_all_wb,
  466. .dflr = __flush_dcache_range_wb,
  467. .din = __invalidate_dcache_all_wb,
  468. .dinr = __invalidate_dcache_range_wb,
  469. };
  470. /* There is only difference in ie, id, de, dd functions */
  471. static const struct scache wb_nomsr = {
  472. .ie = __enable_icache_nomsr,
  473. .id = __disable_icache_nomsr,
  474. .ifl = __flush_icache_all_noirq,
  475. .iflr = __flush_icache_range_noirq,
  476. .iin = __flush_icache_all_noirq,
  477. .iinr = __flush_icache_range_noirq,
  478. .de = __enable_dcache_nomsr,
  479. .dd = __disable_dcache_nomsr,
  480. .dfl = __flush_dcache_all_wb,
  481. .dflr = __flush_dcache_range_wb,
  482. .din = __invalidate_dcache_all_wb,
  483. .dinr = __invalidate_dcache_range_wb,
  484. };
  485. /* Old wt cache model with disabling irq and turn off cache */
  486. static const struct scache wt_msr = {
  487. .ie = __enable_icache_msr,
  488. .id = __disable_icache_msr,
  489. .ifl = __flush_icache_all_msr_irq,
  490. .iflr = __flush_icache_range_msr_irq,
  491. .iin = __flush_icache_all_msr_irq,
  492. .iinr = __flush_icache_range_msr_irq,
  493. .de = __enable_dcache_msr,
  494. .dd = __disable_dcache_msr,
  495. .dfl = __invalidate_dcache_all_msr_irq,
  496. .dflr = __invalidate_dcache_range_msr_irq_wt,
  497. .din = __invalidate_dcache_all_msr_irq,
  498. .dinr = __invalidate_dcache_range_msr_irq_wt,
  499. };
  500. static const struct scache wt_nomsr = {
  501. .ie = __enable_icache_nomsr,
  502. .id = __disable_icache_nomsr,
  503. .ifl = __flush_icache_all_nomsr_irq,
  504. .iflr = __flush_icache_range_nomsr_irq,
  505. .iin = __flush_icache_all_nomsr_irq,
  506. .iinr = __flush_icache_range_nomsr_irq,
  507. .de = __enable_dcache_nomsr,
  508. .dd = __disable_dcache_nomsr,
  509. .dfl = __invalidate_dcache_all_nomsr_irq,
  510. .dflr = __invalidate_dcache_range_nomsr_irq,
  511. .din = __invalidate_dcache_all_nomsr_irq,
  512. .dinr = __invalidate_dcache_range_nomsr_irq,
  513. };
  514. /* New wt cache model for newer Microblaze versions */
  515. static const struct scache wt_msr_noirq = {
  516. .ie = __enable_icache_msr,
  517. .id = __disable_icache_msr,
  518. .ifl = __flush_icache_all_noirq,
  519. .iflr = __flush_icache_range_noirq,
  520. .iin = __flush_icache_all_noirq,
  521. .iinr = __flush_icache_range_noirq,
  522. .de = __enable_dcache_msr,
  523. .dd = __disable_dcache_msr,
  524. .dfl = __invalidate_dcache_all_noirq_wt,
  525. .dflr = __invalidate_dcache_range_nomsr_wt,
  526. .din = __invalidate_dcache_all_noirq_wt,
  527. .dinr = __invalidate_dcache_range_nomsr_wt,
  528. };
  529. static const struct scache wt_nomsr_noirq = {
  530. .ie = __enable_icache_nomsr,
  531. .id = __disable_icache_nomsr,
  532. .ifl = __flush_icache_all_noirq,
  533. .iflr = __flush_icache_range_noirq,
  534. .iin = __flush_icache_all_noirq,
  535. .iinr = __flush_icache_range_noirq,
  536. .de = __enable_dcache_nomsr,
  537. .dd = __disable_dcache_nomsr,
  538. .dfl = __invalidate_dcache_all_noirq_wt,
  539. .dflr = __invalidate_dcache_range_nomsr_wt,
  540. .din = __invalidate_dcache_all_noirq_wt,
  541. .dinr = __invalidate_dcache_range_nomsr_wt,
  542. };
  543. /* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
  544. #define CPUVER_7_20_A 0x0c
  545. #define CPUVER_7_20_D 0x0f
  546. void microblaze_cache_init(void)
  547. {
  548. if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
  549. if (cpuinfo.dcache_wb) {
  550. pr_info("wb_msr\n");
  551. mbc = (struct scache *)&wb_msr;
  552. if (cpuinfo.ver_code <= CPUVER_7_20_D) {
  553. /* MS: problem with signal handling - hw bug */
  554. pr_info("WB won't work properly\n");
  555. }
  556. } else {
  557. if (cpuinfo.ver_code >= CPUVER_7_20_A) {
  558. pr_info("wt_msr_noirq\n");
  559. mbc = (struct scache *)&wt_msr_noirq;
  560. } else {
  561. pr_info("wt_msr\n");
  562. mbc = (struct scache *)&wt_msr;
  563. }
  564. }
  565. } else {
  566. if (cpuinfo.dcache_wb) {
  567. pr_info("wb_nomsr\n");
  568. mbc = (struct scache *)&wb_nomsr;
  569. if (cpuinfo.ver_code <= CPUVER_7_20_D) {
  570. /* MS: problem with signal handling - hw bug */
  571. pr_info("WB won't work properly\n");
  572. }
  573. } else {
  574. if (cpuinfo.ver_code >= CPUVER_7_20_A) {
  575. pr_info("wt_nomsr_noirq\n");
  576. mbc = (struct scache *)&wt_nomsr_noirq;
  577. } else {
  578. pr_info("wt_nomsr\n");
  579. mbc = (struct scache *)&wt_nomsr;
  580. }
  581. }
  582. }
  583. /*
  584. * FIXME Invalidation is done in U-BOOT
  585. * WT cache: Data is already written to main memory
  586. * WB cache: Discard data on noMMU which caused that kernel doesn't boot
  587. */
  588. /* invalidate_dcache(); */
  589. enable_dcache();
  590. invalidate_icache();
  591. enable_icache();
  592. }