spinlock.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. /*
  2. * include/asm-s390/spinlock.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. *
  8. * Derived from "include/asm-i386/spinlock.h"
  9. */
  10. #ifndef __ASM_SPINLOCK_H
  11. #define __ASM_SPINLOCK_H
  12. #include <linux/smp.h>
  13. extern int spin_retry;
  14. static inline int
  15. _raw_compare_and_swap(volatile unsigned int *lock,
  16. unsigned int old, unsigned int new)
  17. {
  18. asm volatile(
  19. " cs %0,%3,%1"
  20. : "=d" (old), "=Q" (*lock)
  21. : "0" (old), "d" (new), "Q" (*lock)
  22. : "cc", "memory" );
  23. return old;
  24. }
  25. /*
  26. * Simple spin lock operations. There are two variants, one clears IRQ's
  27. * on the local processor, one does not.
  28. *
  29. * We make no fairness assumptions. They have a cost.
  30. *
  31. * (the type definitions are in asm/spinlock_types.h)
  32. */
  33. #define arch_spin_is_locked(x) ((x)->owner_cpu != 0)
  34. #define arch_spin_unlock_wait(lock) \
  35. do { while (arch_spin_is_locked(lock)) \
  36. arch_spin_relax(lock); } while (0)
  37. extern void arch_spin_lock_wait(arch_spinlock_t *);
  38. extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
  39. extern int arch_spin_trylock_retry(arch_spinlock_t *);
  40. extern void arch_spin_relax(arch_spinlock_t *lock);
  41. static inline void arch_spin_lock(arch_spinlock_t *lp)
  42. {
  43. int old;
  44. old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
  45. if (likely(old == 0))
  46. return;
  47. arch_spin_lock_wait(lp);
  48. }
  49. static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
  50. unsigned long flags)
  51. {
  52. int old;
  53. old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
  54. if (likely(old == 0))
  55. return;
  56. arch_spin_lock_wait_flags(lp, flags);
  57. }
  58. static inline int arch_spin_trylock(arch_spinlock_t *lp)
  59. {
  60. int old;
  61. old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
  62. if (likely(old == 0))
  63. return 1;
  64. return arch_spin_trylock_retry(lp);
  65. }
  66. static inline void arch_spin_unlock(arch_spinlock_t *lp)
  67. {
  68. _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
  69. }
  70. /*
  71. * Read-write spinlocks, allowing multiple readers
  72. * but only one writer.
  73. *
  74. * NOTE! it is quite common to have readers in interrupts
  75. * but no interrupt writers. For those circumstances we
  76. * can "mix" irq-safe locks - any writer needs to get a
  77. * irq-safe write-lock, but readers can get non-irqsafe
  78. * read-locks.
  79. */
  80. /**
  81. * read_can_lock - would read_trylock() succeed?
  82. * @lock: the rwlock in question.
  83. */
  84. #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
  85. /**
  86. * write_can_lock - would write_trylock() succeed?
  87. * @lock: the rwlock in question.
  88. */
  89. #define arch_write_can_lock(x) ((x)->lock == 0)
  90. extern void _raw_read_lock_wait(arch_rwlock_t *lp);
  91. extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
  92. extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
  93. extern void _raw_write_lock_wait(arch_rwlock_t *lp);
  94. extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
  95. extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
  96. static inline void arch_read_lock(arch_rwlock_t *rw)
  97. {
  98. unsigned int old;
  99. old = rw->lock & 0x7fffffffU;
  100. if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
  101. _raw_read_lock_wait(rw);
  102. }
  103. static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
  104. {
  105. unsigned int old;
  106. old = rw->lock & 0x7fffffffU;
  107. if (_raw_compare_and_swap(&rw->lock, old, old + 1) != old)
  108. _raw_read_lock_wait_flags(rw, flags);
  109. }
  110. static inline void arch_read_unlock(arch_rwlock_t *rw)
  111. {
  112. unsigned int old, cmp;
  113. old = rw->lock;
  114. do {
  115. cmp = old;
  116. old = _raw_compare_and_swap(&rw->lock, old, old - 1);
  117. } while (cmp != old);
  118. }
  119. static inline void arch_write_lock(arch_rwlock_t *rw)
  120. {
  121. if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
  122. _raw_write_lock_wait(rw);
  123. }
  124. static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
  125. {
  126. if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0))
  127. _raw_write_lock_wait_flags(rw, flags);
  128. }
  129. static inline void arch_write_unlock(arch_rwlock_t *rw)
  130. {
  131. _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
  132. }
  133. static inline int arch_read_trylock(arch_rwlock_t *rw)
  134. {
  135. unsigned int old;
  136. old = rw->lock & 0x7fffffffU;
  137. if (likely(_raw_compare_and_swap(&rw->lock, old, old + 1) == old))
  138. return 1;
  139. return _raw_read_trylock_retry(rw);
  140. }
  141. static inline int arch_write_trylock(arch_rwlock_t *rw)
  142. {
  143. if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0))
  144. return 1;
  145. return _raw_write_trylock_retry(rw);
  146. }
  147. #define arch_read_relax(lock) cpu_relax()
  148. #define arch_write_relax(lock) cpu_relax()
  149. #endif /* __ASM_SPINLOCK_H */