rwsem.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_GENERIC_RWSEM_H
  3. #define _ASM_GENERIC_RWSEM_H
  4. #ifndef _LINUX_RWSEM_H
  5. #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
  6. #endif
  7. #ifdef __KERNEL__
  8. /*
  9. * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
  10. * Adapted largely from include/asm-i386/rwsem.h
  11. * by Paul Mackerras <paulus@samba.org>.
  12. */
  13. /*
  14. * the semaphore definition
  15. */
  16. #ifdef CONFIG_64BIT
  17. # define RWSEM_ACTIVE_MASK 0xffffffffL
  18. #else
  19. # define RWSEM_ACTIVE_MASK 0x0000ffffL
  20. #endif
  21. #define RWSEM_UNLOCKED_VALUE 0x00000000L
  22. #define RWSEM_ACTIVE_BIAS 0x00000001L
  23. #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
  24. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  25. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  26. /*
  27. * lock for reading
  28. */
  29. static inline void __down_read(struct rw_semaphore *sem)
  30. {
  31. if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
  32. rwsem_down_read_failed(sem);
  33. }
  34. static inline int __down_read_trylock(struct rw_semaphore *sem)
  35. {
  36. long tmp;
  37. while ((tmp = atomic_long_read(&sem->count)) >= 0) {
  38. if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
  39. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  40. return 1;
  41. }
  42. }
  43. return 0;
  44. }
  45. /*
  46. * lock for writing
  47. */
  48. static inline void __down_write(struct rw_semaphore *sem)
  49. {
  50. long tmp;
  51. tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
  52. &sem->count);
  53. if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
  54. rwsem_down_write_failed(sem);
  55. }
  56. static inline int __down_write_killable(struct rw_semaphore *sem)
  57. {
  58. long tmp;
  59. tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
  60. &sem->count);
  61. if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
  62. if (IS_ERR(rwsem_down_write_failed_killable(sem)))
  63. return -EINTR;
  64. return 0;
  65. }
  66. static inline int __down_write_trylock(struct rw_semaphore *sem)
  67. {
  68. long tmp;
  69. tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
  70. RWSEM_ACTIVE_WRITE_BIAS);
  71. return tmp == RWSEM_UNLOCKED_VALUE;
  72. }
  73. /*
  74. * unlock after reading
  75. */
  76. static inline void __up_read(struct rw_semaphore *sem)
  77. {
  78. long tmp;
  79. tmp = atomic_long_dec_return_release(&sem->count);
  80. if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
  81. rwsem_wake(sem);
  82. }
  83. /*
  84. * unlock after writing
  85. */
  86. static inline void __up_write(struct rw_semaphore *sem)
  87. {
  88. if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
  89. &sem->count) < 0))
  90. rwsem_wake(sem);
  91. }
  92. /*
  93. * downgrade write lock to read lock
  94. */
  95. static inline void __downgrade_write(struct rw_semaphore *sem)
  96. {
  97. long tmp;
  98. /*
  99. * When downgrading from exclusive to shared ownership,
  100. * anything inside the write-locked region cannot leak
  101. * into the read side. In contrast, anything in the
  102. * read-locked region is ok to be re-ordered into the
  103. * write side. As such, rely on RELEASE semantics.
  104. */
  105. tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
  106. if (tmp < 0)
  107. rwsem_downgrade_wake(sem);
  108. }
  109. #endif /* __KERNEL__ */
  110. #endif /* _ASM_GENERIC_RWSEM_H */