atomic-llsc.h 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. #ifndef __ASM_SH_ATOMIC_LLSC_H
  2. #define __ASM_SH_ATOMIC_LLSC_H
  3. /*
  4. * To get proper branch prediction for the main line, we must branch
  5. * forward to code at the end of this object's .text section, then
  6. * branch back to restart the operation.
  7. */
  8. static inline void atomic_add(int i, atomic_t *v)
  9. {
  10. unsigned long tmp;
  11. __asm__ __volatile__ (
  12. "1: movli.l @%2, %0 ! atomic_add \n"
  13. " add %1, %0 \n"
  14. " movco.l %0, @%2 \n"
  15. " bf 1b \n"
  16. : "=&z" (tmp)
  17. : "r" (i), "r" (&v->counter)
  18. : "t");
  19. }
  20. static inline void atomic_sub(int i, atomic_t *v)
  21. {
  22. unsigned long tmp;
  23. __asm__ __volatile__ (
  24. "1: movli.l @%2, %0 ! atomic_sub \n"
  25. " sub %1, %0 \n"
  26. " movco.l %0, @%2 \n"
  27. " bf 1b \n"
  28. : "=&z" (tmp)
  29. : "r" (i), "r" (&v->counter)
  30. : "t");
  31. }
  32. /*
  33. * SH-4A note:
  34. *
  35. * We basically get atomic_xxx_return() for free compared with
  36. * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
  37. * encoding, so the retval is automatically set without having to
  38. * do any special work.
  39. */
  40. static inline int atomic_add_return(int i, atomic_t *v)
  41. {
  42. unsigned long temp;
  43. __asm__ __volatile__ (
  44. "1: movli.l @%2, %0 ! atomic_add_return \n"
  45. " add %1, %0 \n"
  46. " movco.l %0, @%2 \n"
  47. " bf 1b \n"
  48. " synco \n"
  49. : "=&z" (temp)
  50. : "r" (i), "r" (&v->counter)
  51. : "t");
  52. return temp;
  53. }
  54. static inline int atomic_sub_return(int i, atomic_t *v)
  55. {
  56. unsigned long temp;
  57. __asm__ __volatile__ (
  58. "1: movli.l @%2, %0 ! atomic_sub_return \n"
  59. " sub %1, %0 \n"
  60. " movco.l %0, @%2 \n"
  61. " bf 1b \n"
  62. " synco \n"
  63. : "=&z" (temp)
  64. : "r" (i), "r" (&v->counter)
  65. : "t");
  66. return temp;
  67. }
  68. static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
  69. {
  70. unsigned long tmp;
  71. __asm__ __volatile__ (
  72. "1: movli.l @%2, %0 ! atomic_clear_mask \n"
  73. " and %1, %0 \n"
  74. " movco.l %0, @%2 \n"
  75. " bf 1b \n"
  76. : "=&z" (tmp)
  77. : "r" (~mask), "r" (&v->counter)
  78. : "t");
  79. }
  80. static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
  81. {
  82. unsigned long tmp;
  83. __asm__ __volatile__ (
  84. "1: movli.l @%2, %0 ! atomic_set_mask \n"
  85. " or %1, %0 \n"
  86. " movco.l %0, @%2 \n"
  87. " bf 1b \n"
  88. : "=&z" (tmp)
  89. : "r" (mask), "r" (&v->counter)
  90. : "t");
  91. }
  92. #endif /* __ASM_SH_ATOMIC_LLSC_H */