ultra.S 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. /*
  2. * ultra.S: Don't expand these all over the place...
  3. *
  4. * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
  5. */
  6. #include <asm/asi.h>
  7. #include <asm/pgtable.h>
  8. #include <asm/page.h>
  9. #include <asm/spitfire.h>
  10. #include <asm/mmu_context.h>
  11. #include <asm/mmu.h>
  12. #include <asm/pil.h>
  13. #include <asm/head.h>
  14. #include <asm/thread_info.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/hypervisor.h>
  17. #include <asm/cpudata.h>
  18. /* Basically, most of the Spitfire vs. Cheetah madness
  19. * has to do with the fact that Cheetah does not support
  20. * IMMU flushes out of the secondary context. Someone needs
  21. * to throw a south lake birthday party for the folks
  22. * in Microelectronics who refused to fix this shit.
  23. */
  24. /* This file is meant to be read efficiently by the CPU, not humans.
  25. * Staraj sie tego nikomu nie pierdolnac...
  26. */
  27. .text
  28. .align 32
  29. .globl __flush_tlb_mm
  30. __flush_tlb_mm: /* 18 insns */
  31. /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  32. ldxa [%o1] ASI_DMMU, %g2
  33. cmp %g2, %o0
  34. bne,pn %icc, __spitfire_flush_tlb_mm_slow
  35. mov 0x50, %g3
  36. stxa %g0, [%g3] ASI_DMMU_DEMAP
  37. stxa %g0, [%g3] ASI_IMMU_DEMAP
  38. sethi %hi(KERNBASE), %g3
  39. flush %g3
  40. retl
  41. nop
  42. nop
  43. nop
  44. nop
  45. nop
  46. nop
  47. nop
  48. nop
  49. nop
  50. nop
  51. .align 32
  52. .globl __flush_tlb_page
  53. __flush_tlb_page: /* 22 insns */
  54. /* %o0 = context, %o1 = vaddr */
  55. rdpr %pstate, %g7
  56. andn %g7, PSTATE_IE, %g2
  57. wrpr %g2, %pstate
  58. mov SECONDARY_CONTEXT, %o4
  59. ldxa [%o4] ASI_DMMU, %g2
  60. stxa %o0, [%o4] ASI_DMMU
  61. andcc %o1, 1, %g0
  62. andn %o1, 1, %o3
  63. be,pn %icc, 1f
  64. or %o3, 0x10, %o3
  65. stxa %g0, [%o3] ASI_IMMU_DEMAP
  66. 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
  67. membar #Sync
  68. stxa %g2, [%o4] ASI_DMMU
  69. sethi %hi(KERNBASE), %o4
  70. flush %o4
  71. retl
  72. wrpr %g7, 0x0, %pstate
  73. nop
  74. nop
  75. nop
  76. nop
  77. .align 32
  78. .globl __flush_tlb_pending
  79. __flush_tlb_pending: /* 26 insns */
  80. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  81. rdpr %pstate, %g7
  82. sllx %o1, 3, %o1
  83. andn %g7, PSTATE_IE, %g2
  84. wrpr %g2, %pstate
  85. mov SECONDARY_CONTEXT, %o4
  86. ldxa [%o4] ASI_DMMU, %g2
  87. stxa %o0, [%o4] ASI_DMMU
  88. 1: sub %o1, (1 << 3), %o1
  89. ldx [%o2 + %o1], %o3
  90. andcc %o3, 1, %g0
  91. andn %o3, 1, %o3
  92. be,pn %icc, 2f
  93. or %o3, 0x10, %o3
  94. stxa %g0, [%o3] ASI_IMMU_DEMAP
  95. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  96. membar #Sync
  97. brnz,pt %o1, 1b
  98. nop
  99. stxa %g2, [%o4] ASI_DMMU
  100. sethi %hi(KERNBASE), %o4
  101. flush %o4
  102. retl
  103. wrpr %g7, 0x0, %pstate
  104. nop
  105. nop
  106. nop
  107. nop
  108. .align 32
  109. .globl __flush_tlb_kernel_range
  110. __flush_tlb_kernel_range: /* 16 insns */
  111. /* %o0=start, %o1=end */
  112. cmp %o0, %o1
  113. be,pn %xcc, 2f
  114. sethi %hi(PAGE_SIZE), %o4
  115. sub %o1, %o0, %o3
  116. sub %o3, %o4, %o3
  117. or %o0, 0x20, %o0 ! Nucleus
  118. 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
  119. stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
  120. membar #Sync
  121. brnz,pt %o3, 1b
  122. sub %o3, %o4, %o3
  123. 2: sethi %hi(KERNBASE), %o3
  124. flush %o3
  125. retl
  126. nop
  127. nop
  128. __spitfire_flush_tlb_mm_slow:
  129. rdpr %pstate, %g1
  130. wrpr %g1, PSTATE_IE, %pstate
  131. stxa %o0, [%o1] ASI_DMMU
  132. stxa %g0, [%g3] ASI_DMMU_DEMAP
  133. stxa %g0, [%g3] ASI_IMMU_DEMAP
  134. flush %g6
  135. stxa %g2, [%o1] ASI_DMMU
  136. sethi %hi(KERNBASE), %o1
  137. flush %o1
  138. retl
  139. wrpr %g1, 0, %pstate
  140. /*
  141. * The following code flushes one page_size worth.
  142. */
  143. .section .kprobes.text, "ax"
  144. .align 32
  145. .globl __flush_icache_page
  146. __flush_icache_page: /* %o0 = phys_page */
  147. srlx %o0, PAGE_SHIFT, %o0
  148. sethi %uhi(PAGE_OFFSET), %g1
  149. sllx %o0, PAGE_SHIFT, %o0
  150. sethi %hi(PAGE_SIZE), %g2
  151. sllx %g1, 32, %g1
  152. add %o0, %g1, %o0
  153. 1: subcc %g2, 32, %g2
  154. bne,pt %icc, 1b
  155. flush %o0 + %g2
  156. retl
  157. nop
  158. #ifdef DCACHE_ALIASING_POSSIBLE
  159. #if (PAGE_SHIFT != 13)
  160. #error only page shift of 13 is supported by dcache flush
  161. #endif
  162. #define DTAG_MASK 0x3
  163. /* This routine is Spitfire specific so the hardcoded
  164. * D-cache size and line-size are OK.
  165. */
  166. .align 64
  167. .globl __flush_dcache_page
  168. __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
  169. sethi %uhi(PAGE_OFFSET), %g1
  170. sllx %g1, 32, %g1
  171. sub %o0, %g1, %o0 ! physical address
  172. srlx %o0, 11, %o0 ! make D-cache TAG
  173. sethi %hi(1 << 14), %o2 ! D-cache size
  174. sub %o2, (1 << 5), %o2 ! D-cache line size
  175. 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
  176. andcc %o3, DTAG_MASK, %g0 ! Valid?
  177. be,pn %xcc, 2f ! Nope, branch
  178. andn %o3, DTAG_MASK, %o3 ! Clear valid bits
  179. cmp %o3, %o0 ! TAG match?
  180. bne,pt %xcc, 2f ! Nope, branch
  181. nop
  182. stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
  183. membar #Sync
  184. 2: brnz,pt %o2, 1b
  185. sub %o2, (1 << 5), %o2 ! D-cache line size
  186. /* The I-cache does not snoop local stores so we
  187. * better flush that too when necessary.
  188. */
  189. brnz,pt %o1, __flush_icache_page
  190. sllx %o0, 11, %o0
  191. retl
  192. nop
  193. #endif /* DCACHE_ALIASING_POSSIBLE */
  194. .previous
  195. /* Cheetah specific versions, patched at boot time. */
  196. __cheetah_flush_tlb_mm: /* 19 insns */
  197. rdpr %pstate, %g7
  198. andn %g7, PSTATE_IE, %g2
  199. wrpr %g2, 0x0, %pstate
  200. wrpr %g0, 1, %tl
  201. mov PRIMARY_CONTEXT, %o2
  202. mov 0x40, %g3
  203. ldxa [%o2] ASI_DMMU, %g2
  204. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
  205. sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
  206. or %o0, %o1, %o0 /* Preserve nucleus page size fields */
  207. stxa %o0, [%o2] ASI_DMMU
  208. stxa %g0, [%g3] ASI_DMMU_DEMAP
  209. stxa %g0, [%g3] ASI_IMMU_DEMAP
  210. stxa %g2, [%o2] ASI_DMMU
  211. sethi %hi(KERNBASE), %o2
  212. flush %o2
  213. wrpr %g0, 0, %tl
  214. retl
  215. wrpr %g7, 0x0, %pstate
  216. __cheetah_flush_tlb_page: /* 22 insns */
  217. /* %o0 = context, %o1 = vaddr */
  218. rdpr %pstate, %g7
  219. andn %g7, PSTATE_IE, %g2
  220. wrpr %g2, 0x0, %pstate
  221. wrpr %g0, 1, %tl
  222. mov PRIMARY_CONTEXT, %o4
  223. ldxa [%o4] ASI_DMMU, %g2
  224. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
  225. sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
  226. or %o0, %o3, %o0 /* Preserve nucleus page size fields */
  227. stxa %o0, [%o4] ASI_DMMU
  228. andcc %o1, 1, %g0
  229. be,pn %icc, 1f
  230. andn %o1, 1, %o3
  231. stxa %g0, [%o3] ASI_IMMU_DEMAP
  232. 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
  233. membar #Sync
  234. stxa %g2, [%o4] ASI_DMMU
  235. sethi %hi(KERNBASE), %o4
  236. flush %o4
  237. wrpr %g0, 0, %tl
  238. retl
  239. wrpr %g7, 0x0, %pstate
  240. __cheetah_flush_tlb_pending: /* 27 insns */
  241. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  242. rdpr %pstate, %g7
  243. sllx %o1, 3, %o1
  244. andn %g7, PSTATE_IE, %g2
  245. wrpr %g2, 0x0, %pstate
  246. wrpr %g0, 1, %tl
  247. mov PRIMARY_CONTEXT, %o4
  248. ldxa [%o4] ASI_DMMU, %g2
  249. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
  250. sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
  251. or %o0, %o3, %o0 /* Preserve nucleus page size fields */
  252. stxa %o0, [%o4] ASI_DMMU
  253. 1: sub %o1, (1 << 3), %o1
  254. ldx [%o2 + %o1], %o3
  255. andcc %o3, 1, %g0
  256. be,pn %icc, 2f
  257. andn %o3, 1, %o3
  258. stxa %g0, [%o3] ASI_IMMU_DEMAP
  259. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  260. membar #Sync
  261. brnz,pt %o1, 1b
  262. nop
  263. stxa %g2, [%o4] ASI_DMMU
  264. sethi %hi(KERNBASE), %o4
  265. flush %o4
  266. wrpr %g0, 0, %tl
  267. retl
  268. wrpr %g7, 0x0, %pstate
  269. #ifdef DCACHE_ALIASING_POSSIBLE
  270. __cheetah_flush_dcache_page: /* 11 insns */
  271. sethi %uhi(PAGE_OFFSET), %g1
  272. sllx %g1, 32, %g1
  273. sub %o0, %g1, %o0
  274. sethi %hi(PAGE_SIZE), %o4
  275. 1: subcc %o4, (1 << 5), %o4
  276. stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
  277. membar #Sync
  278. bne,pt %icc, 1b
  279. nop
  280. retl /* I-cache flush never needed on Cheetah, see callers. */
  281. nop
  282. #endif /* DCACHE_ALIASING_POSSIBLE */
  283. /* Hypervisor specific versions, patched at boot time. */
  284. __hypervisor_tlb_tl0_error:
  285. save %sp, -192, %sp
  286. mov %i0, %o0
  287. call hypervisor_tlbop_error
  288. mov %i1, %o1
  289. ret
  290. restore
  291. __hypervisor_flush_tlb_mm: /* 10 insns */
  292. mov %o0, %o2 /* ARG2: mmu context */
  293. mov 0, %o0 /* ARG0: CPU lists unimplemented */
  294. mov 0, %o1 /* ARG1: CPU lists unimplemented */
  295. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  296. mov HV_FAST_MMU_DEMAP_CTX, %o5
  297. ta HV_FAST_TRAP
  298. brnz,pn %o0, __hypervisor_tlb_tl0_error
  299. mov HV_FAST_MMU_DEMAP_CTX, %o1
  300. retl
  301. nop
  302. __hypervisor_flush_tlb_page: /* 11 insns */
  303. /* %o0 = context, %o1 = vaddr */
  304. mov %o0, %g2
  305. mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
  306. mov %g2, %o1 /* ARG1: mmu context */
  307. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  308. srlx %o0, PAGE_SHIFT, %o0
  309. sllx %o0, PAGE_SHIFT, %o0
  310. ta HV_MMU_UNMAP_ADDR_TRAP
  311. brnz,pn %o0, __hypervisor_tlb_tl0_error
  312. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  313. retl
  314. nop
  315. __hypervisor_flush_tlb_pending: /* 16 insns */
  316. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  317. sllx %o1, 3, %g1
  318. mov %o2, %g2
  319. mov %o0, %g3
  320. 1: sub %g1, (1 << 3), %g1
  321. ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
  322. mov %g3, %o1 /* ARG1: mmu context */
  323. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  324. srlx %o0, PAGE_SHIFT, %o0
  325. sllx %o0, PAGE_SHIFT, %o0
  326. ta HV_MMU_UNMAP_ADDR_TRAP
  327. brnz,pn %o0, __hypervisor_tlb_tl0_error
  328. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  329. brnz,pt %g1, 1b
  330. nop
  331. retl
  332. nop
  333. __hypervisor_flush_tlb_kernel_range: /* 16 insns */
  334. /* %o0=start, %o1=end */
  335. cmp %o0, %o1
  336. be,pn %xcc, 2f
  337. sethi %hi(PAGE_SIZE), %g3
  338. mov %o0, %g1
  339. sub %o1, %g1, %g2
  340. sub %g2, %g3, %g2
  341. 1: add %g1, %g2, %o0 /* ARG0: virtual address */
  342. mov 0, %o1 /* ARG1: mmu context */
  343. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  344. ta HV_MMU_UNMAP_ADDR_TRAP
  345. brnz,pn %o0, __hypervisor_tlb_tl0_error
  346. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  347. brnz,pt %g2, 1b
  348. sub %g2, %g3, %g2
  349. 2: retl
  350. nop
  351. #ifdef DCACHE_ALIASING_POSSIBLE
  352. /* XXX Niagara and friends have an 8K cache, so no aliasing is
  353. * XXX possible, but nothing explicit in the Hypervisor API
  354. * XXX guarantees this.
  355. */
  356. __hypervisor_flush_dcache_page: /* 2 insns */
  357. retl
  358. nop
  359. #endif
  360. tlb_patch_one:
  361. 1: lduw [%o1], %g1
  362. stw %g1, [%o0]
  363. flush %o0
  364. subcc %o2, 1, %o2
  365. add %o1, 4, %o1
  366. bne,pt %icc, 1b
  367. add %o0, 4, %o0
  368. retl
  369. nop
  370. .globl cheetah_patch_cachetlbops
  371. cheetah_patch_cachetlbops:
  372. save %sp, -128, %sp
  373. sethi %hi(__flush_tlb_mm), %o0
  374. or %o0, %lo(__flush_tlb_mm), %o0
  375. sethi %hi(__cheetah_flush_tlb_mm), %o1
  376. or %o1, %lo(__cheetah_flush_tlb_mm), %o1
  377. call tlb_patch_one
  378. mov 19, %o2
  379. sethi %hi(__flush_tlb_page), %o0
  380. or %o0, %lo(__flush_tlb_page), %o0
  381. sethi %hi(__cheetah_flush_tlb_page), %o1
  382. or %o1, %lo(__cheetah_flush_tlb_page), %o1
  383. call tlb_patch_one
  384. mov 22, %o2
  385. sethi %hi(__flush_tlb_pending), %o0
  386. or %o0, %lo(__flush_tlb_pending), %o0
  387. sethi %hi(__cheetah_flush_tlb_pending), %o1
  388. or %o1, %lo(__cheetah_flush_tlb_pending), %o1
  389. call tlb_patch_one
  390. mov 27, %o2
  391. #ifdef DCACHE_ALIASING_POSSIBLE
  392. sethi %hi(__flush_dcache_page), %o0
  393. or %o0, %lo(__flush_dcache_page), %o0
  394. sethi %hi(__cheetah_flush_dcache_page), %o1
  395. or %o1, %lo(__cheetah_flush_dcache_page), %o1
  396. call tlb_patch_one
  397. mov 11, %o2
  398. #endif /* DCACHE_ALIASING_POSSIBLE */
  399. ret
  400. restore
  401. #ifdef CONFIG_SMP
  402. /* These are all called by the slaves of a cross call, at
  403. * trap level 1, with interrupts fully disabled.
  404. *
  405. * Register usage:
  406. * %g5 mm->context (all tlb flushes)
  407. * %g1 address arg 1 (tlb page and range flushes)
  408. * %g7 address arg 2 (tlb range flush only)
  409. *
  410. * %g6 scratch 1
  411. * %g2 scratch 2
  412. * %g3 scratch 3
  413. * %g4 scratch 4
  414. */
  415. .align 32
  416. .globl xcall_flush_tlb_mm
  417. xcall_flush_tlb_mm: /* 21 insns */
  418. mov PRIMARY_CONTEXT, %g2
  419. ldxa [%g2] ASI_DMMU, %g3
  420. srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
  421. sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
  422. or %g5, %g4, %g5 /* Preserve nucleus page size fields */
  423. stxa %g5, [%g2] ASI_DMMU
  424. mov 0x40, %g4
  425. stxa %g0, [%g4] ASI_DMMU_DEMAP
  426. stxa %g0, [%g4] ASI_IMMU_DEMAP
  427. stxa %g3, [%g2] ASI_DMMU
  428. retry
  429. nop
  430. nop
  431. nop
  432. nop
  433. nop
  434. nop
  435. nop
  436. nop
  437. nop
  438. nop
  439. .globl xcall_flush_tlb_page
  440. xcall_flush_tlb_page: /* 17 insns */
  441. /* %g5=context, %g1=vaddr */
  442. mov PRIMARY_CONTEXT, %g4
  443. ldxa [%g4] ASI_DMMU, %g2
  444. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
  445. sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
  446. or %g5, %g4, %g5
  447. mov PRIMARY_CONTEXT, %g4
  448. stxa %g5, [%g4] ASI_DMMU
  449. andcc %g1, 0x1, %g0
  450. be,pn %icc, 2f
  451. andn %g1, 0x1, %g5
  452. stxa %g0, [%g5] ASI_IMMU_DEMAP
  453. 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
  454. membar #Sync
  455. stxa %g2, [%g4] ASI_DMMU
  456. retry
  457. nop
  458. nop
  459. .globl xcall_flush_tlb_kernel_range
  460. xcall_flush_tlb_kernel_range: /* 25 insns */
  461. sethi %hi(PAGE_SIZE - 1), %g2
  462. or %g2, %lo(PAGE_SIZE - 1), %g2
  463. andn %g1, %g2, %g1
  464. andn %g7, %g2, %g7
  465. sub %g7, %g1, %g3
  466. add %g2, 1, %g2
  467. sub %g3, %g2, %g3
  468. or %g1, 0x20, %g1 ! Nucleus
  469. 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
  470. stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
  471. membar #Sync
  472. brnz,pt %g3, 1b
  473. sub %g3, %g2, %g3
  474. retry
  475. nop
  476. nop
  477. nop
  478. nop
  479. nop
  480. nop
  481. nop
  482. nop
  483. nop
  484. nop
  485. nop
  486. /* This runs in a very controlled environment, so we do
  487. * not need to worry about BH races etc.
  488. */
  489. .globl xcall_sync_tick
  490. xcall_sync_tick:
  491. 661: rdpr %pstate, %g2
  492. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  493. .section .sun4v_2insn_patch, "ax"
  494. .word 661b
  495. nop
  496. nop
  497. .previous
  498. rdpr %pil, %g2
  499. wrpr %g0, PIL_NORMAL_MAX, %pil
  500. sethi %hi(109f), %g7
  501. b,pt %xcc, etrap_irq
  502. 109: or %g7, %lo(109b), %g7
  503. #ifdef CONFIG_TRACE_IRQFLAGS
  504. call trace_hardirqs_off
  505. nop
  506. #endif
  507. call smp_synchronize_tick_client
  508. nop
  509. b rtrap_xcall
  510. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  511. .globl xcall_fetch_glob_regs
  512. xcall_fetch_glob_regs:
  513. sethi %hi(global_reg_snapshot), %g1
  514. or %g1, %lo(global_reg_snapshot), %g1
  515. __GET_CPUID(%g2)
  516. sllx %g2, 6, %g3
  517. add %g1, %g3, %g1
  518. rdpr %tstate, %g7
  519. stx %g7, [%g1 + GR_SNAP_TSTATE]
  520. rdpr %tpc, %g7
  521. stx %g7, [%g1 + GR_SNAP_TPC]
  522. rdpr %tnpc, %g7
  523. stx %g7, [%g1 + GR_SNAP_TNPC]
  524. stx %o7, [%g1 + GR_SNAP_O7]
  525. stx %i7, [%g1 + GR_SNAP_I7]
  526. /* Don't try this at home kids... */
  527. rdpr %cwp, %g3
  528. sub %g3, 1, %g7
  529. wrpr %g7, %cwp
  530. mov %i7, %g7
  531. wrpr %g3, %cwp
  532. stx %g7, [%g1 + GR_SNAP_RPC]
  533. sethi %hi(trap_block), %g7
  534. or %g7, %lo(trap_block), %g7
  535. sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
  536. add %g7, %g2, %g7
  537. ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
  538. stx %g3, [%g1 + GR_SNAP_THREAD]
  539. retry
  540. #ifdef DCACHE_ALIASING_POSSIBLE
  541. .align 32
  542. .globl xcall_flush_dcache_page_cheetah
  543. xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
  544. sethi %hi(PAGE_SIZE), %g3
  545. 1: subcc %g3, (1 << 5), %g3
  546. stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
  547. membar #Sync
  548. bne,pt %icc, 1b
  549. nop
  550. retry
  551. nop
  552. #endif /* DCACHE_ALIASING_POSSIBLE */
  553. .globl xcall_flush_dcache_page_spitfire
  554. xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
  555. %g7 == kernel page virtual address
  556. %g5 == (page->mapping != NULL) */
  557. #ifdef DCACHE_ALIASING_POSSIBLE
  558. srlx %g1, (13 - 2), %g1 ! Form tag comparitor
  559. sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
  560. sub %g3, (1 << 5), %g3 ! D$ linesize == 32
  561. 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
  562. andcc %g2, 0x3, %g0
  563. be,pn %xcc, 2f
  564. andn %g2, 0x3, %g2
  565. cmp %g2, %g1
  566. bne,pt %xcc, 2f
  567. nop
  568. stxa %g0, [%g3] ASI_DCACHE_TAG
  569. membar #Sync
  570. 2: cmp %g3, 0
  571. bne,pt %xcc, 1b
  572. sub %g3, (1 << 5), %g3
  573. brz,pn %g5, 2f
  574. #endif /* DCACHE_ALIASING_POSSIBLE */
  575. sethi %hi(PAGE_SIZE), %g3
  576. 1: flush %g7
  577. subcc %g3, (1 << 5), %g3
  578. bne,pt %icc, 1b
  579. add %g7, (1 << 5), %g7
  580. 2: retry
  581. nop
  582. nop
  583. /* %g5: error
  584. * %g6: tlb op
  585. */
  586. __hypervisor_tlb_xcall_error:
  587. mov %g5, %g4
  588. mov %g6, %g5
  589. ba,pt %xcc, etrap
  590. rd %pc, %g7
  591. mov %l4, %o0
  592. call hypervisor_tlbop_error_xcall
  593. mov %l5, %o1
  594. ba,a,pt %xcc, rtrap
  595. .globl __hypervisor_xcall_flush_tlb_mm
  596. __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
  597. /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
  598. mov %o0, %g2
  599. mov %o1, %g3
  600. mov %o2, %g4
  601. mov %o3, %g1
  602. mov %o5, %g7
  603. clr %o0 /* ARG0: CPU lists unimplemented */
  604. clr %o1 /* ARG1: CPU lists unimplemented */
  605. mov %g5, %o2 /* ARG2: mmu context */
  606. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  607. mov HV_FAST_MMU_DEMAP_CTX, %o5
  608. ta HV_FAST_TRAP
  609. mov HV_FAST_MMU_DEMAP_CTX, %g6
  610. brnz,pn %o0, __hypervisor_tlb_xcall_error
  611. mov %o0, %g5
  612. mov %g2, %o0
  613. mov %g3, %o1
  614. mov %g4, %o2
  615. mov %g1, %o3
  616. mov %g7, %o5
  617. membar #Sync
  618. retry
  619. .globl __hypervisor_xcall_flush_tlb_page
  620. __hypervisor_xcall_flush_tlb_page: /* 17 insns */
  621. /* %g5=ctx, %g1=vaddr */
  622. mov %o0, %g2
  623. mov %o1, %g3
  624. mov %o2, %g4
  625. mov %g1, %o0 /* ARG0: virtual address */
  626. mov %g5, %o1 /* ARG1: mmu context */
  627. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  628. srlx %o0, PAGE_SHIFT, %o0
  629. sllx %o0, PAGE_SHIFT, %o0
  630. ta HV_MMU_UNMAP_ADDR_TRAP
  631. mov HV_MMU_UNMAP_ADDR_TRAP, %g6
  632. brnz,a,pn %o0, __hypervisor_tlb_xcall_error
  633. mov %o0, %g5
  634. mov %g2, %o0
  635. mov %g3, %o1
  636. mov %g4, %o2
  637. membar #Sync
  638. retry
  639. .globl __hypervisor_xcall_flush_tlb_kernel_range
  640. __hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
  641. /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
  642. sethi %hi(PAGE_SIZE - 1), %g2
  643. or %g2, %lo(PAGE_SIZE - 1), %g2
  644. andn %g1, %g2, %g1
  645. andn %g7, %g2, %g7
  646. sub %g7, %g1, %g3
  647. add %g2, 1, %g2
  648. sub %g3, %g2, %g3
  649. mov %o0, %g2
  650. mov %o1, %g4
  651. mov %o2, %g7
  652. 1: add %g1, %g3, %o0 /* ARG0: virtual address */
  653. mov 0, %o1 /* ARG1: mmu context */
  654. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  655. ta HV_MMU_UNMAP_ADDR_TRAP
  656. mov HV_MMU_UNMAP_ADDR_TRAP, %g6
  657. brnz,pn %o0, __hypervisor_tlb_xcall_error
  658. mov %o0, %g5
  659. sethi %hi(PAGE_SIZE), %o2
  660. brnz,pt %g3, 1b
  661. sub %g3, %o2, %g3
  662. mov %g2, %o0
  663. mov %g4, %o1
  664. mov %g7, %o2
  665. membar #Sync
  666. retry
  667. /* These just get rescheduled to PIL vectors. */
  668. .globl xcall_call_function
  669. xcall_call_function:
  670. wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
  671. retry
  672. .globl xcall_call_function_single
  673. xcall_call_function_single:
  674. wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
  675. retry
  676. .globl xcall_receive_signal
  677. xcall_receive_signal:
  678. wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
  679. retry
  680. .globl xcall_capture
  681. xcall_capture:
  682. wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
  683. retry
  684. .globl xcall_new_mmu_context_version
  685. xcall_new_mmu_context_version:
  686. wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
  687. retry
  688. #ifdef CONFIG_KGDB
  689. .globl xcall_kgdb_capture
  690. xcall_kgdb_capture:
  691. wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
  692. retry
  693. #endif
  694. #endif /* CONFIG_SMP */
  695. .globl hypervisor_patch_cachetlbops
  696. hypervisor_patch_cachetlbops:
  697. save %sp, -128, %sp
  698. sethi %hi(__flush_tlb_mm), %o0
  699. or %o0, %lo(__flush_tlb_mm), %o0
  700. sethi %hi(__hypervisor_flush_tlb_mm), %o1
  701. or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
  702. call tlb_patch_one
  703. mov 10, %o2
  704. sethi %hi(__flush_tlb_page), %o0
  705. or %o0, %lo(__flush_tlb_page), %o0
  706. sethi %hi(__hypervisor_flush_tlb_page), %o1
  707. or %o1, %lo(__hypervisor_flush_tlb_page), %o1
  708. call tlb_patch_one
  709. mov 11, %o2
  710. sethi %hi(__flush_tlb_pending), %o0
  711. or %o0, %lo(__flush_tlb_pending), %o0
  712. sethi %hi(__hypervisor_flush_tlb_pending), %o1
  713. or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
  714. call tlb_patch_one
  715. mov 16, %o2
  716. sethi %hi(__flush_tlb_kernel_range), %o0
  717. or %o0, %lo(__flush_tlb_kernel_range), %o0
  718. sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
  719. or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
  720. call tlb_patch_one
  721. mov 16, %o2
  722. #ifdef DCACHE_ALIASING_POSSIBLE
  723. sethi %hi(__flush_dcache_page), %o0
  724. or %o0, %lo(__flush_dcache_page), %o0
  725. sethi %hi(__hypervisor_flush_dcache_page), %o1
  726. or %o1, %lo(__hypervisor_flush_dcache_page), %o1
  727. call tlb_patch_one
  728. mov 2, %o2
  729. #endif /* DCACHE_ALIASING_POSSIBLE */
  730. #ifdef CONFIG_SMP
  731. sethi %hi(xcall_flush_tlb_mm), %o0
  732. or %o0, %lo(xcall_flush_tlb_mm), %o0
  733. sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
  734. or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
  735. call tlb_patch_one
  736. mov 21, %o2
  737. sethi %hi(xcall_flush_tlb_page), %o0
  738. or %o0, %lo(xcall_flush_tlb_page), %o0
  739. sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
  740. or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
  741. call tlb_patch_one
  742. mov 17, %o2
  743. sethi %hi(xcall_flush_tlb_kernel_range), %o0
  744. or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
  745. sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
  746. or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
  747. call tlb_patch_one
  748. mov 25, %o2
  749. #endif /* CONFIG_SMP */
  750. ret
  751. restore