cplbinit.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /*
  2. * Blackfin CPLB initialization
  3. *
  4. * Copyright 2007-2009 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <asm/blackfin.h>
  10. #include <asm/cacheflush.h>
  11. #include <asm/cplb.h>
  12. #include <asm/cplbinit.h>
  13. #include <asm/mem_map.h>
  14. struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
  15. struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
  16. int first_switched_icplb PDT_ATTR;
  17. int first_switched_dcplb PDT_ATTR;
  18. struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
  19. struct cplb_boundary icplb_bounds[9] PDT_ATTR;
  20. int icplb_nr_bounds PDT_ATTR;
  21. int dcplb_nr_bounds PDT_ATTR;
  22. void __init generate_cplb_tables_cpu(unsigned int cpu)
  23. {
  24. int i_d, i_i;
  25. unsigned long addr;
  26. unsigned long cplb_pageflags, cplb_pagesize;
  27. struct cplb_entry *d_tbl = dcplb_tbl[cpu];
  28. struct cplb_entry *i_tbl = icplb_tbl[cpu];
  29. printk(KERN_INFO "NOMPU: setting up cplb tables\n");
  30. i_d = i_i = 0;
  31. #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
  32. /* Set up the zero page. */
  33. d_tbl[i_d].addr = 0;
  34. d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
  35. i_tbl[i_i].addr = 0;
  36. i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
  37. #endif
  38. /* Cover kernel memory with 4M pages. */
  39. addr = 0;
  40. #ifdef PAGE_SIZE_16MB
  41. cplb_pageflags = PAGE_SIZE_16MB;
  42. cplb_pagesize = SIZE_16M;
  43. #else
  44. cplb_pageflags = PAGE_SIZE_4MB;
  45. cplb_pagesize = SIZE_4M;
  46. #endif
  47. for (; addr < memory_start; addr += cplb_pagesize) {
  48. d_tbl[i_d].addr = addr;
  49. d_tbl[i_d++].data = SDRAM_DGENERIC | cplb_pageflags;
  50. i_tbl[i_i].addr = addr;
  51. i_tbl[i_i++].data = SDRAM_IGENERIC | cplb_pageflags;
  52. }
  53. #ifdef CONFIG_ROMKERNEL
  54. /* Cover kernel XIP flash area */
  55. #ifdef CONFIG_BF60x
  56. addr = CONFIG_ROM_BASE & ~(16 * 1024 * 1024 - 1);
  57. d_tbl[i_d].addr = addr;
  58. d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_16MB;
  59. i_tbl[i_i].addr = addr;
  60. i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_16MB;
  61. #else
  62. addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
  63. d_tbl[i_d].addr = addr;
  64. d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
  65. i_tbl[i_i].addr = addr;
  66. i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
  67. #endif
  68. #endif
  69. /* Cover L1 memory. One 4M area for code and data each is enough. */
  70. if (cpu == 0) {
  71. if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
  72. d_tbl[i_d].addr = L1_DATA_A_START;
  73. d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
  74. }
  75. i_tbl[i_i].addr = L1_CODE_START;
  76. i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
  77. }
  78. #ifdef CONFIG_SMP
  79. else {
  80. if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
  81. d_tbl[i_d].addr = COREB_L1_DATA_A_START;
  82. d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
  83. }
  84. i_tbl[i_i].addr = COREB_L1_CODE_START;
  85. i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
  86. }
  87. #endif
  88. first_switched_dcplb = i_d;
  89. first_switched_icplb = i_i;
  90. BUG_ON(first_switched_dcplb > MAX_CPLBS);
  91. BUG_ON(first_switched_icplb > MAX_CPLBS);
  92. while (i_d < MAX_CPLBS)
  93. d_tbl[i_d++].data = 0;
  94. while (i_i < MAX_CPLBS)
  95. i_tbl[i_i++].data = 0;
  96. }
  97. void __init generate_cplb_tables_all(void)
  98. {
  99. unsigned long uncached_end;
  100. int i_d, i_i;
  101. i_d = 0;
  102. /* Normal RAM, including MTD FS. */
  103. #ifdef CONFIG_MTD_UCLINUX
  104. uncached_end = memory_mtd_start + mtd_size;
  105. #else
  106. uncached_end = memory_end;
  107. #endif
  108. /*
  109. * if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
  110. * so that we don't have to use 4kB pages and cause CPLB thrashing
  111. */
  112. if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
  113. ((_ramend - uncached_end) >= 1 * 1024 * 1024))
  114. dcplb_bounds[i_d].eaddr = uncached_end;
  115. else
  116. dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
  117. dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
  118. /* DMA uncached region. */
  119. if (DMA_UNCACHED_REGION) {
  120. dcplb_bounds[i_d].eaddr = _ramend;
  121. dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
  122. }
  123. if (_ramend != physical_mem_end) {
  124. /* Reserved memory. */
  125. dcplb_bounds[i_d].eaddr = physical_mem_end;
  126. dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
  127. SDRAM_DGENERIC : SDRAM_DNON_CHBL);
  128. }
  129. /* Addressing hole up to the async bank. */
  130. dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
  131. dcplb_bounds[i_d++].data = 0;
  132. /* ASYNC banks. */
  133. dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
  134. dcplb_bounds[i_d++].data = SDRAM_EBIU;
  135. /* Addressing hole up to BootROM. */
  136. dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
  137. dcplb_bounds[i_d++].data = 0;
  138. /* BootROM -- largest one should be less than 1 meg. */
  139. dcplb_bounds[i_d].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH;
  140. dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
  141. if (L2_LENGTH) {
  142. /* Addressing hole up to L2 SRAM. */
  143. dcplb_bounds[i_d].eaddr = L2_START;
  144. dcplb_bounds[i_d++].data = 0;
  145. /* L2 SRAM. */
  146. dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
  147. dcplb_bounds[i_d++].data = L2_DMEMORY;
  148. }
  149. dcplb_nr_bounds = i_d;
  150. BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
  151. i_i = 0;
  152. /* Normal RAM, including MTD FS. */
  153. icplb_bounds[i_i].eaddr = uncached_end;
  154. icplb_bounds[i_i++].data = SDRAM_IGENERIC;
  155. if (_ramend != physical_mem_end) {
  156. /* DMA uncached region. */
  157. if (DMA_UNCACHED_REGION) {
  158. /* Normally this hole is caught by the async below. */
  159. icplb_bounds[i_i].eaddr = _ramend;
  160. icplb_bounds[i_i++].data = 0;
  161. }
  162. /* Reserved memory. */
  163. icplb_bounds[i_i].eaddr = physical_mem_end;
  164. icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
  165. SDRAM_IGENERIC : SDRAM_INON_CHBL);
  166. }
  167. /* Addressing hole up to the async bank. */
  168. icplb_bounds[i_i].eaddr = ASYNC_BANK0_BASE;
  169. icplb_bounds[i_i++].data = 0;
  170. /* ASYNC banks. */
  171. icplb_bounds[i_i].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
  172. icplb_bounds[i_i++].data = SDRAM_EBIU;
  173. /* Addressing hole up to BootROM. */
  174. icplb_bounds[i_i].eaddr = BOOT_ROM_START;
  175. icplb_bounds[i_i++].data = 0;
  176. /* BootROM -- largest one should be less than 1 meg. */
  177. icplb_bounds[i_i].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH;
  178. icplb_bounds[i_i++].data = SDRAM_IGENERIC;
  179. if (L2_LENGTH) {
  180. /* Addressing hole up to L2 SRAM. */
  181. icplb_bounds[i_i].eaddr = L2_START;
  182. icplb_bounds[i_i++].data = 0;
  183. /* L2 SRAM. */
  184. icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
  185. icplb_bounds[i_i++].data = L2_IMEMORY;
  186. }
  187. icplb_nr_bounds = i_i;
  188. BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));
  189. }