tsb.S 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. /* tsb.S: Sparc64 TSB table handling.
  2. *
  3. * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <asm/tsb.h>
  6. #include <asm/hypervisor.h>
  7. #include <asm/page.h>
  8. #include <asm/cpudata.h>
  9. #include <asm/mmu.h>
  10. .text
  11. .align 32
  12. /* Invoked from TLB miss handler, we are in the
  13. * MMU global registers and they are setup like
  14. * this:
  15. *
  16. * %g1: TSB entry pointer
  17. * %g2: available temporary
  18. * %g3: FAULT_CODE_{D,I}TLB
  19. * %g4: available temporary
  20. * %g5: available temporary
  21. * %g6: TAG TARGET
  22. * %g7: available temporary, will be loaded by us with
  23. * the physical address base of the linux page
  24. * tables for the current address space
  25. */
  26. tsb_miss_dtlb:
  27. mov TLB_TAG_ACCESS, %g4
  28. ldxa [%g4] ASI_DMMU, %g4
  29. srlx %g4, PAGE_SHIFT, %g4
  30. ba,pt %xcc, tsb_miss_page_table_walk
  31. sllx %g4, PAGE_SHIFT, %g4
  32. tsb_miss_itlb:
  33. mov TLB_TAG_ACCESS, %g4
  34. ldxa [%g4] ASI_IMMU, %g4
  35. srlx %g4, PAGE_SHIFT, %g4
  36. ba,pt %xcc, tsb_miss_page_table_walk
  37. sllx %g4, PAGE_SHIFT, %g4
  38. /* At this point we have:
  39. * %g1 -- PAGE_SIZE TSB entry address
  40. * %g3 -- FAULT_CODE_{D,I}TLB
  41. * %g4 -- missing virtual address
  42. * %g6 -- TAG TARGET (vaddr >> 22)
  43. */
  44. tsb_miss_page_table_walk:
  45. TRAP_LOAD_TRAP_BLOCK(%g7, %g5)
  46. /* Before committing to a full page table walk,
  47. * check the huge page TSB.
  48. */
  49. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  50. 661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5
  51. nop
  52. .section .sun4v_2insn_patch, "ax"
  53. .word 661b
  54. mov SCRATCHPAD_UTSBREG2, %g5
  55. ldxa [%g5] ASI_SCRATCHPAD, %g5
  56. .previous
  57. cmp %g5, -1
  58. be,pt %xcc, 80f
  59. nop
  60. /* We need an aligned pair of registers containing 2 values
  61. * which can be easily rematerialized. %g6 and %g7 foot the
  62. * bill just nicely. We'll save %g6 away into %g2 for the
  63. * huge page TSB TAG comparison.
  64. *
  65. * Perform a huge page TSB lookup.
  66. */
  67. mov %g6, %g2
  68. and %g5, 0x7, %g6
  69. mov 512, %g7
  70. andn %g5, 0x7, %g5
  71. sllx %g7, %g6, %g7
  72. srlx %g4, REAL_HPAGE_SHIFT, %g6
  73. sub %g7, 1, %g7
  74. and %g6, %g7, %g6
  75. sllx %g6, 4, %g6
  76. add %g5, %g6, %g5
  77. TSB_LOAD_QUAD(%g5, %g6)
  78. cmp %g6, %g2
  79. be,a,pt %xcc, tsb_tlb_reload
  80. mov %g7, %g5
  81. /* No match, remember the huge page TSB entry address,
  82. * and restore %g6 and %g7.
  83. */
  84. TRAP_LOAD_TRAP_BLOCK(%g7, %g6)
  85. srlx %g4, 22, %g6
  86. 80: stx %g5, [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP]
  87. #endif
  88. ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7
  89. /* At this point we have:
  90. * %g1 -- TSB entry address
  91. * %g3 -- FAULT_CODE_{D,I}TLB
  92. * %g4 -- missing virtual address
  93. * %g6 -- TAG TARGET (vaddr >> 22)
  94. * %g7 -- page table physical address
  95. *
  96. * We know that both the base PAGE_SIZE TSB and the HPAGE_SIZE
  97. * TSB both lack a matching entry.
  98. */
  99. tsb_miss_page_table_walk_sun4v_fastpath:
  100. USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
  101. /* Valid PTE is now in %g5. */
  102. #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
  103. 661: sethi %uhi(_PAGE_SZALL_4U), %g7
  104. sllx %g7, 32, %g7
  105. .section .sun4v_2insn_patch, "ax"
  106. .word 661b
  107. mov _PAGE_SZALL_4V, %g7
  108. nop
  109. .previous
  110. and %g5, %g7, %g2
  111. 661: sethi %uhi(_PAGE_SZHUGE_4U), %g7
  112. sllx %g7, 32, %g7
  113. .section .sun4v_2insn_patch, "ax"
  114. .word 661b
  115. mov _PAGE_SZHUGE_4V, %g7
  116. nop
  117. .previous
  118. cmp %g2, %g7
  119. bne,pt %xcc, 60f
  120. nop
  121. /* It is a huge page, use huge page TSB entry address we
  122. * calculated above. If the huge page TSB has not been
  123. * allocated, setup a trap stack and call hugetlb_setup()
  124. * to do so, then return from the trap to replay the TLB
  125. * miss.
  126. *
  127. * This is necessary to handle the case of transparent huge
  128. * pages where we don't really have a non-atomic context
  129. * in which to allocate the hugepage TSB hash table. When
  130. * the 'mm' faults in the hugepage for the first time, we
  131. * thus handle it here. This also makes sure that we can
  132. * allocate the TSB hash table on the correct NUMA node.
  133. */
  134. TRAP_LOAD_TRAP_BLOCK(%g7, %g2)
  135. ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1
  136. cmp %g1, -1
  137. bne,pt %xcc, 60f
  138. nop
  139. 661: rdpr %pstate, %g5
  140. wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
  141. .section .sun4v_2insn_patch, "ax"
  142. .word 661b
  143. SET_GL(1)
  144. nop
  145. .previous
  146. rdpr %tl, %g7
  147. cmp %g7, 1
  148. bne,pn %xcc, winfix_trampoline
  149. mov %g3, %g4
  150. ba,pt %xcc, etrap
  151. rd %pc, %g7
  152. call hugetlb_setup
  153. add %sp, PTREGS_OFF, %o0
  154. ba,pt %xcc, rtrap
  155. nop
  156. 60:
  157. #endif
  158. /* At this point we have:
  159. * %g1 -- TSB entry address
  160. * %g3 -- FAULT_CODE_{D,I}TLB
  161. * %g5 -- valid PTE
  162. * %g6 -- TAG TARGET (vaddr >> 22)
  163. */
  164. tsb_reload:
  165. TSB_LOCK_TAG(%g1, %g2, %g7)
  166. TSB_WRITE(%g1, %g5, %g6)
  167. /* Finally, load TLB and return from trap. */
  168. tsb_tlb_reload:
  169. cmp %g3, FAULT_CODE_DTLB
  170. bne,pn %xcc, tsb_itlb_load
  171. nop
  172. tsb_dtlb_load:
  173. 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN
  174. retry
  175. .section .sun4v_2insn_patch, "ax"
  176. .word 661b
  177. nop
  178. nop
  179. .previous
  180. /* For sun4v the ASI_DTLB_DATA_IN store and the retry
  181. * instruction get nop'd out and we get here to branch
  182. * to the sun4v tlb load code. The registers are setup
  183. * as follows:
  184. *
  185. * %g4: vaddr
  186. * %g5: PTE
  187. * %g6: TAG
  188. *
  189. * The sun4v TLB load wants the PTE in %g3 so we fix that
  190. * up here.
  191. */
  192. ba,pt %xcc, sun4v_dtlb_load
  193. mov %g5, %g3
  194. tsb_itlb_load:
  195. /* Executable bit must be set. */
  196. 661: sethi %hi(_PAGE_EXEC_4U), %g4
  197. andcc %g5, %g4, %g0
  198. .section .sun4v_2insn_patch, "ax"
  199. .word 661b
  200. andcc %g5, _PAGE_EXEC_4V, %g0
  201. nop
  202. .previous
  203. be,pn %xcc, tsb_do_fault
  204. nop
  205. 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
  206. retry
  207. .section .sun4v_2insn_patch, "ax"
  208. .word 661b
  209. nop
  210. nop
  211. .previous
  212. /* For sun4v the ASI_ITLB_DATA_IN store and the retry
  213. * instruction get nop'd out and we get here to branch
  214. * to the sun4v tlb load code. The registers are setup
  215. * as follows:
  216. *
  217. * %g4: vaddr
  218. * %g5: PTE
  219. * %g6: TAG
  220. *
  221. * The sun4v TLB load wants the PTE in %g3 so we fix that
  222. * up here.
  223. */
  224. ba,pt %xcc, sun4v_itlb_load
  225. mov %g5, %g3
  226. /* No valid entry in the page tables, do full fault
  227. * processing.
  228. */
  229. .globl tsb_do_fault
  230. tsb_do_fault:
  231. cmp %g3, FAULT_CODE_DTLB
  232. 661: rdpr %pstate, %g5
  233. wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
  234. .section .sun4v_2insn_patch, "ax"
  235. .word 661b
  236. SET_GL(1)
  237. ldxa [%g0] ASI_SCRATCHPAD, %g4
  238. .previous
  239. bne,pn %xcc, tsb_do_itlb_fault
  240. nop
  241. tsb_do_dtlb_fault:
  242. rdpr %tl, %g3
  243. cmp %g3, 1
  244. 661: mov TLB_TAG_ACCESS, %g4
  245. ldxa [%g4] ASI_DMMU, %g5
  246. .section .sun4v_2insn_patch, "ax"
  247. .word 661b
  248. ldx [%g4 + HV_FAULT_D_ADDR_OFFSET], %g5
  249. nop
  250. .previous
  251. /* Clear context ID bits. */
  252. srlx %g5, PAGE_SHIFT, %g5
  253. sllx %g5, PAGE_SHIFT, %g5
  254. be,pt %xcc, sparc64_realfault_common
  255. mov FAULT_CODE_DTLB, %g4
  256. ba,pt %xcc, winfix_trampoline
  257. nop
  258. tsb_do_itlb_fault:
  259. rdpr %tpc, %g5
  260. ba,pt %xcc, sparc64_realfault_common
  261. mov FAULT_CODE_ITLB, %g4
  262. .globl sparc64_realfault_common
  263. sparc64_realfault_common:
  264. /* fault code in %g4, fault address in %g5, etrap will
  265. * preserve these two values in %l4 and %l5 respectively
  266. */
  267. ba,pt %xcc, etrap ! Save trap state
  268. 1: rd %pc, %g7 ! ...
  269. stb %l4, [%g6 + TI_FAULT_CODE] ! Save fault code
  270. stx %l5, [%g6 + TI_FAULT_ADDR] ! Save fault address
  271. call do_sparc64_fault ! Call fault handler
  272. add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg
  273. ba,pt %xcc, rtrap ! Restore cpu state
  274. nop ! Delay slot (fill me)
  275. winfix_trampoline:
  276. rdpr %tpc, %g3 ! Prepare winfixup TNPC
  277. or %g3, 0x7c, %g3 ! Compute branch offset
  278. wrpr %g3, %tnpc ! Write it into TNPC
  279. done ! Trap return
  280. /* Insert an entry into the TSB.
  281. *
  282. * %o0: TSB entry pointer (virt or phys address)
  283. * %o1: tag
  284. * %o2: pte
  285. */
  286. .align 32
  287. .globl __tsb_insert
  288. __tsb_insert:
  289. rdpr %pstate, %o5
  290. wrpr %o5, PSTATE_IE, %pstate
  291. TSB_LOCK_TAG(%o0, %g2, %g3)
  292. TSB_WRITE(%o0, %o2, %o1)
  293. wrpr %o5, %pstate
  294. retl
  295. nop
  296. .size __tsb_insert, .-__tsb_insert
  297. /* Flush the given TSB entry if it has the matching
  298. * tag.
  299. *
  300. * %o0: TSB entry pointer (virt or phys address)
  301. * %o1: tag
  302. */
  303. .align 32
  304. .globl tsb_flush
  305. .type tsb_flush,#function
  306. tsb_flush:
  307. sethi %hi(TSB_TAG_LOCK_HIGH), %g2
  308. 1: TSB_LOAD_TAG(%o0, %g1)
  309. srlx %g1, 32, %o3
  310. andcc %o3, %g2, %g0
  311. bne,pn %icc, 1b
  312. nop
  313. cmp %g1, %o1
  314. mov 1, %o3
  315. bne,pt %xcc, 2f
  316. sllx %o3, TSB_TAG_INVALID_BIT, %o3
  317. TSB_CAS_TAG(%o0, %g1, %o3)
  318. cmp %g1, %o3
  319. bne,pn %xcc, 1b
  320. nop
  321. 2: retl
  322. nop
  323. .size tsb_flush, .-tsb_flush
  324. /* Reload MMU related context switch state at
  325. * schedule() time.
  326. *
  327. * %o0: page table physical address
  328. * %o1: TSB base config pointer
  329. * %o2: TSB huge config pointer, or NULL if none
  330. * %o3: Hypervisor TSB descriptor physical address
  331. * %o4: Secondary context to load, if non-zero
  332. *
  333. * We have to run this whole thing with interrupts
  334. * disabled so that the current cpu doesn't change
  335. * due to preemption.
  336. */
  337. .align 32
  338. .globl __tsb_context_switch
  339. .type __tsb_context_switch,#function
  340. __tsb_context_switch:
  341. rdpr %pstate, %g1
  342. wrpr %g1, PSTATE_IE, %pstate
  343. brz,pn %o4, 1f
  344. mov SECONDARY_CONTEXT, %o5
  345. 661: stxa %o4, [%o5] ASI_DMMU
  346. .section .sun4v_1insn_patch, "ax"
  347. .word 661b
  348. stxa %o4, [%o5] ASI_MMU
  349. .previous
  350. flush %g6
  351. 1:
  352. TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
  353. stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
  354. ldx [%o1 + TSB_CONFIG_REG_VAL], %o0
  355. brz,pt %o2, 1f
  356. mov -1, %g3
  357. ldx [%o2 + TSB_CONFIG_REG_VAL], %g3
  358. 1: stx %g3, [%g2 + TRAP_PER_CPU_TSB_HUGE]
  359. sethi %hi(tlb_type), %g2
  360. lduw [%g2 + %lo(tlb_type)], %g2
  361. cmp %g2, 3
  362. bne,pt %icc, 50f
  363. nop
  364. /* Hypervisor TSB switch. */
  365. mov SCRATCHPAD_UTSBREG1, %o5
  366. stxa %o0, [%o5] ASI_SCRATCHPAD
  367. mov SCRATCHPAD_UTSBREG2, %o5
  368. stxa %g3, [%o5] ASI_SCRATCHPAD
  369. mov 2, %o0
  370. cmp %g3, -1
  371. move %xcc, 1, %o0
  372. mov HV_FAST_MMU_TSB_CTXNON0, %o5
  373. mov %o3, %o1
  374. ta HV_FAST_TRAP
  375. /* Finish up. */
  376. ba,pt %xcc, 9f
  377. nop
  378. /* SUN4U TSB switch. */
  379. 50: mov TSB_REG, %o5
  380. stxa %o0, [%o5] ASI_DMMU
  381. membar #Sync
  382. stxa %o0, [%o5] ASI_IMMU
  383. membar #Sync
  384. 2: ldx [%o1 + TSB_CONFIG_MAP_VADDR], %o4
  385. brz %o4, 9f
  386. ldx [%o1 + TSB_CONFIG_MAP_PTE], %o5
  387. sethi %hi(sparc64_highest_unlocked_tlb_ent), %g2
  388. mov TLB_TAG_ACCESS, %g3
  389. lduw [%g2 + %lo(sparc64_highest_unlocked_tlb_ent)], %g2
  390. stxa %o4, [%g3] ASI_DMMU
  391. membar #Sync
  392. sllx %g2, 3, %g2
  393. stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
  394. membar #Sync
  395. brz,pt %o2, 9f
  396. nop
  397. ldx [%o2 + TSB_CONFIG_MAP_VADDR], %o4
  398. ldx [%o2 + TSB_CONFIG_MAP_PTE], %o5
  399. mov TLB_TAG_ACCESS, %g3
  400. stxa %o4, [%g3] ASI_DMMU
  401. membar #Sync
  402. sub %g2, (1 << 3), %g2
  403. stxa %o5, [%g2] ASI_DTLB_DATA_ACCESS
  404. membar #Sync
  405. 9:
  406. wrpr %g1, %pstate
  407. retl
  408. nop
  409. .size __tsb_context_switch, .-__tsb_context_switch
  410. #define TSB_PASS_BITS ((1 << TSB_TAG_LOCK_BIT) | \
  411. (1 << TSB_TAG_INVALID_BIT))
  412. .align 32
  413. .globl copy_tsb
  414. .type copy_tsb,#function
  415. copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
  416. * %o2=new_tsb_base, %o3=new_tsb_size
  417. * %o4=page_size_shift
  418. */
  419. sethi %uhi(TSB_PASS_BITS), %g7
  420. srlx %o3, 4, %o3
  421. add %o0, %o1, %o1 /* end of old tsb */
  422. sllx %g7, 32, %g7
  423. sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
  424. mov %o4, %g1 /* page_size_shift */
  425. 661: prefetcha [%o0] ASI_N, #one_read
  426. .section .tsb_phys_patch, "ax"
  427. .word 661b
  428. prefetcha [%o0] ASI_PHYS_USE_EC, #one_read
  429. .previous
  430. 90: andcc %o0, (64 - 1), %g0
  431. bne 1f
  432. add %o0, 64, %o5
  433. 661: prefetcha [%o5] ASI_N, #one_read
  434. .section .tsb_phys_patch, "ax"
  435. .word 661b
  436. prefetcha [%o5] ASI_PHYS_USE_EC, #one_read
  437. .previous
  438. 1: TSB_LOAD_QUAD(%o0, %g2) /* %g2/%g3 == TSB entry */
  439. andcc %g2, %g7, %g0 /* LOCK or INVALID set? */
  440. bne,pn %xcc, 80f /* Skip it */
  441. sllx %g2, 22, %o4 /* TAG --> VADDR */
  442. /* This can definitely be computed faster... */
  443. srlx %o0, 4, %o5 /* Build index */
  444. and %o5, 511, %o5 /* Mask index */
  445. sllx %o5, %g1, %o5 /* Put into vaddr position */
  446. or %o4, %o5, %o4 /* Full VADDR. */
  447. srlx %o4, %g1, %o4 /* Shift down to create index */
  448. and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
  449. sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
  450. TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
  451. add %o4, 0x8, %o4 /* Advance to TTE */
  452. TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
  453. 80: add %o0, 16, %o0
  454. cmp %o0, %o1
  455. bne,pt %xcc, 90b
  456. nop
  457. retl
  458. nop
  459. .size copy_tsb, .-copy_tsb
  460. /* Set the invalid bit in all TSB entries. */
  461. .align 32
  462. .globl tsb_init
  463. .type tsb_init,#function
  464. tsb_init: /* %o0 = TSB vaddr, %o1 = size in bytes */
  465. prefetch [%o0 + 0x000], #n_writes
  466. mov 1, %g1
  467. prefetch [%o0 + 0x040], #n_writes
  468. sllx %g1, TSB_TAG_INVALID_BIT, %g1
  469. prefetch [%o0 + 0x080], #n_writes
  470. 1: prefetch [%o0 + 0x0c0], #n_writes
  471. stx %g1, [%o0 + 0x00]
  472. stx %g1, [%o0 + 0x10]
  473. stx %g1, [%o0 + 0x20]
  474. stx %g1, [%o0 + 0x30]
  475. prefetch [%o0 + 0x100], #n_writes
  476. stx %g1, [%o0 + 0x40]
  477. stx %g1, [%o0 + 0x50]
  478. stx %g1, [%o0 + 0x60]
  479. stx %g1, [%o0 + 0x70]
  480. prefetch [%o0 + 0x140], #n_writes
  481. stx %g1, [%o0 + 0x80]
  482. stx %g1, [%o0 + 0x90]
  483. stx %g1, [%o0 + 0xa0]
  484. stx %g1, [%o0 + 0xb0]
  485. prefetch [%o0 + 0x180], #n_writes
  486. stx %g1, [%o0 + 0xc0]
  487. stx %g1, [%o0 + 0xd0]
  488. stx %g1, [%o0 + 0xe0]
  489. stx %g1, [%o0 + 0xf0]
  490. subcc %o1, 0x100, %o1
  491. bne,pt %xcc, 1b
  492. add %o0, 0x100, %o0
  493. retl
  494. nop
  495. nop
  496. nop
  497. .size tsb_init, .-tsb_init
  498. .globl NGtsb_init
  499. .type NGtsb_init,#function
  500. NGtsb_init:
  501. rd %asi, %g2
  502. mov 1, %g1
  503. wr %g0, ASI_BLK_INIT_QUAD_LDD_P, %asi
  504. sllx %g1, TSB_TAG_INVALID_BIT, %g1
  505. 1: stxa %g1, [%o0 + 0x00] %asi
  506. stxa %g1, [%o0 + 0x10] %asi
  507. stxa %g1, [%o0 + 0x20] %asi
  508. stxa %g1, [%o0 + 0x30] %asi
  509. stxa %g1, [%o0 + 0x40] %asi
  510. stxa %g1, [%o0 + 0x50] %asi
  511. stxa %g1, [%o0 + 0x60] %asi
  512. stxa %g1, [%o0 + 0x70] %asi
  513. stxa %g1, [%o0 + 0x80] %asi
  514. stxa %g1, [%o0 + 0x90] %asi
  515. stxa %g1, [%o0 + 0xa0] %asi
  516. stxa %g1, [%o0 + 0xb0] %asi
  517. stxa %g1, [%o0 + 0xc0] %asi
  518. stxa %g1, [%o0 + 0xd0] %asi
  519. stxa %g1, [%o0 + 0xe0] %asi
  520. stxa %g1, [%o0 + 0xf0] %asi
  521. subcc %o1, 0x100, %o1
  522. bne,pt %xcc, 1b
  523. add %o0, 0x100, %o0
  524. membar #Sync
  525. retl
  526. wr %g2, 0x0, %asi
  527. .size NGtsb_init, .-NGtsb_init