processor.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. // this file was taken from libogc, see http://www.devkitpro.org/
  2. #ifndef __PROCESSOR_H__
  3. #define __PROCESSOR_H__
  4. #include <gctypes.h>
  5. #define __stringify(rn) #rn
  6. #define ATTRIBUTE_ALIGN(v) __attribute__((aligned(v)))
  7. #define _sync() asm volatile("sync")
  8. #define _nop() asm volatile("nop")
  9. #define ppcsync() asm volatile("sc")
  10. #define ppchalt() ({ \
  11. asm volatile("sync"); \
  12. while(1) { \
  13. asm volatile("nop"); \
  14. asm volatile("li 3,0"); \
  15. asm volatile("nop"); \
  16. } \
  17. })
  18. #define mfdcr(_rn) ({register u32 _rval; \
  19. asm volatile("mfdcr %0," __stringify(_rn) \
  20. : "=r" (_rval)); _rval;})
  21. #define mtdcr(rn, val) asm volatile("mtdcr " __stringify(rn) ",%0" : : "r" (val))
  22. #define mfmsr() ({register u32 _rval; \
  23. asm volatile("mfmsr %0" : "=r" (_rval)); _rval;})
  24. #define mtmsr(val) asm volatile("mtmsr %0" : : "r" (val))
  25. #define mfdec() ({register u32 _rval; \
  26. asm volatile("mfdec %0" : "=r" (_rval)); _rval;})
  27. #define mtdec(_val) asm volatile("mtdec %0" : : "r" (_val))
  28. #define mfspr(_rn) \
  29. ({ register u32 _rval = 0; \
  30. asm volatile("mfspr %0," __stringify(_rn) \
  31. : "=r" (_rval));\
  32. _rval; \
  33. })
  34. #define mtspr(_rn, _val) asm volatile("mtspr " __stringify(_rn) ",%0" : : "r" (_val))
  35. #define mfwpar() mfspr(WPAR)
  36. #define mtwpar(_val) mtspr(WPAR,_val)
  37. #define mfmmcr0() mfspr(MMCR0)
  38. #define mtmmcr0(_val) mtspr(MMCR0,_val)
  39. #define mfmmcr1() mfspr(MMCR1)
  40. #define mtmmcr1(_val) mtspr(MMCR1,_val)
  41. #define mfpmc1() mfspr(PMC1)
  42. #define mtpmc1(_val) mtspr(PMC1,_val)
  43. #define mfpmc2() mfspr(PMC2)
  44. #define mtpmc2(_val) mtspr(PMC2,_val)
  45. #define mfpmc3() mfspr(PMC3)
  46. #define mtpmc3(_val) mtspr(PMC3,_val)
  47. #define mfpmc4() mfspr(PMC4)
  48. #define mtpmc4(_val) mtspr(PMC4,_val)
  49. #define mfhid0() mfspr(HID0)
  50. #define mthid0(_val) mtspr(HID0,_val)
  51. #define mfhid1() mfspr(HID1)
  52. #define mthid1(_val) mtspr(HID1,_val)
  53. #define mfhid2() mfspr(HID2)
  54. #define mthid2(_val) mtspr(HID2,_val)
  55. #define mfhid4() mfspr(HID4)
  56. #define mthid4(_val) mtspr(HID4,_val)
  57. #define cntlzw(_val) ({register u32 _rval; \
  58. asm volatile("cntlzw %0, %1" : "=r"((_rval)) : "r"((_val))); _rval;})
  59. #define _CPU_MSR_GET( _msr_value ) \
  60. do { \
  61. _msr_value = 0; \
  62. asm volatile ("mfmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); \
  63. } while (0)
  64. #define _CPU_MSR_SET( _msr_value ) \
  65. { asm volatile ("mtmsr %0" : "=&r" ((_msr_value)) : "0" ((_msr_value))); }
  66. #define _CPU_ISR_Enable() \
  67. { register u32 _val = 0; \
  68. asm volatile ("mfmsr %0; ori %0,%0,0x8000; mtmsr %0" : \
  69. "=&r" (_val) : "0" (_val));\
  70. }
  71. #define _CPU_ISR_Disable( _isr_cookie ) \
  72. { register u32 _disable_mask = MSR_EE; \
  73. _isr_cookie = 0; \
  74. asm volatile ( \
  75. "mfmsr %0; andc %1,%0,%1; mtmsr %1" : \
  76. "=&r" ((_isr_cookie)), "=&r" ((_disable_mask)) : \
  77. "0" ((_isr_cookie)), "1" ((_disable_mask)) \
  78. ); \
  79. }
  80. #define _CPU_ISR_Restore( _isr_cookie ) \
  81. { \
  82. asm volatile ( "mtmsr %0" : \
  83. "=r" ((_isr_cookie)) : \
  84. "0" ((_isr_cookie))); \
  85. }
  86. #define _CPU_ISR_Flash( _isr_cookie ) \
  87. { register u32 _disable_mask = MSR_EE; \
  88. asm volatile ( \
  89. "mtmsr %0; andc %1,%0,%1; mtmsr %1" : \
  90. "=r" ((_isr_cookie)), "=r" ((_disable_mask)) : \
  91. "0" ((_isr_cookie)), "1" ((_disable_mask)) \
  92. ); \
  93. }
  94. #endif