head_64.S 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. /*
  2. * Copyright 2011 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * TILE startup code.
  15. */
  16. #include <linux/linkage.h>
  17. #include <linux/init.h>
  18. #include <asm/page.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/thread_info.h>
  21. #include <asm/processor.h>
  22. #include <asm/asm-offsets.h>
  23. #include <hv/hypervisor.h>
  24. #include <arch/chip.h>
  25. #include <arch/spr_def.h>
  26. /*
  27. * This module contains the entry code for kernel images. It performs the
  28. * minimal setup needed to call the generic C routines.
  29. */
  30. __HEAD
  31. ENTRY(_start)
  32. /* Notify the hypervisor of what version of the API we want */
  33. {
  34. movei r1, TILE_CHIP
  35. movei r2, TILE_CHIP_REV
  36. }
  37. {
  38. moveli r0, _HV_VERSION
  39. jal hv_init
  40. }
  41. /* Get a reasonable default ASID in r0 */
  42. {
  43. move r0, zero
  44. jal hv_inquire_asid
  45. }
  46. /*
  47. * Install the default page table. The relocation required to
  48. * statically define the table is a bit too complex, so we have
  49. * to plug in the pointer from the L0 to the L1 table by hand.
  50. * We only do this on the first cpu to boot, though, since the
  51. * other CPUs should see a properly-constructed page table.
  52. */
  53. {
  54. v4int_l r2, zero, r0 /* ASID for hv_install_context */
  55. moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET)
  56. }
  57. {
  58. shl16insli r4, r4, hw0(swapper_pgprot - PAGE_OFFSET)
  59. }
  60. {
  61. ld r1, r4 /* access_pte for hv_install_context */
  62. }
  63. {
  64. moveli r0, hw1_last(.Lsv_data_pmd - PAGE_OFFSET)
  65. moveli r6, hw1_last(temp_data_pmd - PAGE_OFFSET)
  66. }
  67. {
  68. /* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */
  69. bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL
  70. inv r4
  71. }
  72. bnez r7, .Lno_write
  73. {
  74. shl16insli r0, r0, hw0(.Lsv_data_pmd - PAGE_OFFSET)
  75. shl16insli r6, r6, hw0(temp_data_pmd - PAGE_OFFSET)
  76. }
  77. {
  78. /* Cut off the low bits of the PT address. */
  79. shrui r6, r6, HV_LOG2_PAGE_TABLE_ALIGN
  80. /* Start with our access pte. */
  81. move r5, r1
  82. }
  83. {
  84. /* Stuff the address into the page table pointer slot of the PTE. */
  85. bfins r5, r6, HV_PTE_INDEX_PTFN, \
  86. HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
  87. }
  88. {
  89. /* Store the L0 data PTE. */
  90. st r0, r5
  91. addli r6, r6, (temp_code_pmd - temp_data_pmd) >> \
  92. HV_LOG2_PAGE_TABLE_ALIGN
  93. }
  94. {
  95. addli r0, r0, .Lsv_code_pmd - .Lsv_data_pmd
  96. bfins r5, r6, HV_PTE_INDEX_PTFN, \
  97. HV_PTE_INDEX_PTFN + HV_PTE_PTFN_BITS - 1
  98. }
  99. /* Store the L0 code PTE. */
  100. st r0, r5
  101. .Lno_write:
  102. moveli lr, hw2_last(1f)
  103. {
  104. shl16insli lr, lr, hw1(1f)
  105. moveli r0, hw1_last(swapper_pg_dir - PAGE_OFFSET)
  106. }
  107. {
  108. shl16insli lr, lr, hw0(1f)
  109. shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
  110. }
  111. {
  112. move r3, zero
  113. j hv_install_context
  114. }
  115. 1:
  116. /* Install the interrupt base. */
  117. moveli r0, hw2_last(MEM_SV_START)
  118. shl16insli r0, r0, hw1(MEM_SV_START)
  119. shl16insli r0, r0, hw0(MEM_SV_START)
  120. mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0
  121. /*
  122. * Get our processor number and save it away in SAVE_K_0.
  123. * Extract stuff from the topology structure: r4 = y, r6 = x,
  124. * r5 = width. FIXME: consider whether we want to just make these
  125. * 64-bit values (and if so fix smp_topology write below, too).
  126. */
  127. jal hv_inquire_topology
  128. {
  129. v4int_l r5, zero, r1 /* r5 = width */
  130. shrui r4, r0, 32 /* r4 = y */
  131. }
  132. {
  133. v4int_l r6, zero, r0 /* r6 = x */
  134. mul_lu_lu r4, r4, r5
  135. }
  136. {
  137. add r4, r4, r6 /* r4 == cpu == y*width + x */
  138. }
  139. #ifdef CONFIG_SMP
  140. /*
  141. * Load up our per-cpu offset. When the first (master) tile
  142. * boots, this value is still zero, so we will load boot_pc
  143. * with start_kernel, and boot_sp with init_stack + THREAD_SIZE.
  144. * The master tile initializes the per-cpu offset array, so that
  145. * when subsequent (secondary) tiles boot, they will instead load
  146. * from their per-cpu versions of boot_sp and boot_pc.
  147. */
  148. moveli r5, hw2_last(__per_cpu_offset)
  149. shl16insli r5, r5, hw1(__per_cpu_offset)
  150. shl16insli r5, r5, hw0(__per_cpu_offset)
  151. shl3add r5, r4, r5
  152. ld r5, r5
  153. bnez r5, 1f
  154. /*
  155. * Save the width and height to the smp_topology variable
  156. * for later use.
  157. */
  158. moveli r0, hw2_last(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
  159. shl16insli r0, r0, hw1(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
  160. shl16insli r0, r0, hw0(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET)
  161. st r0, r1
  162. 1:
  163. #else
  164. move r5, zero
  165. #endif
  166. /* Load and go with the correct pc and sp. */
  167. {
  168. moveli r1, hw2_last(boot_sp)
  169. moveli r0, hw2_last(boot_pc)
  170. }
  171. {
  172. shl16insli r1, r1, hw1(boot_sp)
  173. shl16insli r0, r0, hw1(boot_pc)
  174. }
  175. {
  176. shl16insli r1, r1, hw0(boot_sp)
  177. shl16insli r0, r0, hw0(boot_pc)
  178. }
  179. {
  180. add r1, r1, r5
  181. add r0, r0, r5
  182. }
  183. ld r0, r0
  184. ld sp, r1
  185. or r4, sp, r4
  186. mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
  187. addi sp, sp, -STACK_TOP_DELTA
  188. {
  189. move lr, zero /* stop backtraces in the called function */
  190. jr r0
  191. }
  192. ENDPROC(_start)
  193. __PAGE_ALIGNED_BSS
  194. .align PAGE_SIZE
  195. ENTRY(empty_zero_page)
  196. .fill PAGE_SIZE,1,0
  197. END(empty_zero_page)
  198. .macro PTE cpa, bits1
  199. .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
  200. HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
  201. (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
  202. .endm
  203. __PAGE_ALIGNED_DATA
  204. .align PAGE_SIZE
  205. ENTRY(swapper_pg_dir)
  206. .org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
  207. .Lsv_data_pmd:
  208. .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
  209. .org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE
  210. .Lsv_code_pmd:
  211. .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
  212. .org swapper_pg_dir + HV_L0_SIZE
  213. END(swapper_pg_dir)
  214. .align HV_PAGE_TABLE_ALIGN
  215. ENTRY(temp_data_pmd)
  216. /*
  217. * We fill the PAGE_OFFSET pmd with huge pages with
  218. * VA = PA + PAGE_OFFSET. We remap things with more precise access
  219. * permissions later.
  220. */
  221. .set addr, 0
  222. .rept HV_L1_ENTRIES
  223. PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
  224. .set addr, addr + HV_PAGE_SIZE_LARGE
  225. .endr
  226. .org temp_data_pmd + HV_L1_SIZE
  227. END(temp_data_pmd)
  228. .align HV_PAGE_TABLE_ALIGN
  229. ENTRY(temp_code_pmd)
  230. /*
  231. * We fill the MEM_SV_START pmd with huge pages with
  232. * VA = PA + PAGE_OFFSET. We remap things with more precise access
  233. * permissions later.
  234. */
  235. .set addr, 0
  236. .rept HV_L1_ENTRIES
  237. PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
  238. .set addr, addr + HV_PAGE_SIZE_LARGE
  239. .endr
  240. .org temp_code_pmd + HV_L1_SIZE
  241. END(temp_code_pmd)
  242. /*
  243. * Isolate swapper_pgprot to its own cache line, since each cpu
  244. * starting up will read it using VA-is-PA and local homing.
  245. * This would otherwise likely conflict with other data on the cache
  246. * line, once we have set its permanent home in the page tables.
  247. */
  248. __INITDATA
  249. .align CHIP_L2_LINE_SIZE()
  250. ENTRY(swapper_pgprot)
  251. .quad HV_PTE_PRESENT | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
  252. .align CHIP_L2_LINE_SIZE()
  253. END(swapper_pgprot)