helpers.S 1.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566
  1. .align 32
  2. .globl __flushw_user
  3. .type __flushw_user,#function
  4. __flushw_user:
  5. rdpr %otherwin, %g1
  6. brz,pn %g1, 2f
  7. clr %g2
  8. 1: save %sp, -128, %sp
  9. rdpr %otherwin, %g1
  10. brnz,pt %g1, 1b
  11. add %g2, 1, %g2
  12. 1: sub %g2, 1, %g2
  13. brnz,pt %g2, 1b
  14. restore %g0, %g0, %g0
  15. 2: retl
  16. nop
  17. .size __flushw_user,.-__flushw_user
  18. EXPORT_SYMBOL(__flushw_user)
  19. /* Flush %fp and %i7 to the stack for all register
  20. * windows active inside of the cpu. This allows
  21. * show_stack_trace() to avoid using an expensive
  22. * 'flushw'.
  23. */
  24. .globl stack_trace_flush
  25. .type stack_trace_flush,#function
  26. stack_trace_flush:
  27. rdpr %pstate, %o0
  28. wrpr %o0, PSTATE_IE, %pstate
  29. rdpr %cwp, %g1
  30. rdpr %canrestore, %g2
  31. sub %g1, 1, %g3
  32. 1: brz,pn %g2, 2f
  33. sub %g2, 1, %g2
  34. wrpr %g3, %cwp
  35. stx %fp, [%sp + STACK_BIAS + RW_V9_I6]
  36. stx %i7, [%sp + STACK_BIAS + RW_V9_I7]
  37. ba,pt %xcc, 1b
  38. sub %g3, 1, %g3
  39. 2: wrpr %g1, %cwp
  40. wrpr %o0, %pstate
  41. retl
  42. nop
  43. .size stack_trace_flush,.-stack_trace_flush
  44. #ifdef CONFIG_SMP
  45. .globl hard_smp_processor_id
  46. .type hard_smp_processor_id,#function
  47. hard_smp_processor_id:
  48. #endif
  49. .globl real_hard_smp_processor_id
  50. .type real_hard_smp_processor_id,#function
  51. real_hard_smp_processor_id:
  52. __GET_CPUID(%o0)
  53. retl
  54. nop
  55. #ifdef CONFIG_SMP
  56. .size hard_smp_processor_id,.-hard_smp_processor_id
  57. #endif
  58. .size real_hard_smp_processor_id,.-real_hard_smp_processor_id
  59. EXPORT_SYMBOL_GPL(real_hard_smp_processor_id)