rwsem.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * include/asm-xtensa/rwsem.h
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Largely copied from include/asm-ppc/rwsem.h
  9. *
  10. * Copyright (C) 2001 - 2005 Tensilica Inc.
  11. */
  12. #ifndef _XTENSA_RWSEM_H
  13. #define _XTENSA_RWSEM_H
  14. #ifndef _LINUX_RWSEM_H
  15. #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
  16. #endif
  17. #define RWSEM_UNLOCKED_VALUE 0x00000000
  18. #define RWSEM_ACTIVE_BIAS 0x00000001
  19. #define RWSEM_ACTIVE_MASK 0x0000ffff
  20. #define RWSEM_WAITING_BIAS (-0x00010000)
  21. #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
  22. #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  23. /*
  24. * lock for reading
  25. */
  26. static inline void __down_read(struct rw_semaphore *sem)
  27. {
  28. if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
  29. smp_wmb();
  30. else
  31. rwsem_down_read_failed(sem);
  32. }
  33. static inline int __down_read_trylock(struct rw_semaphore *sem)
  34. {
  35. int tmp;
  36. while ((tmp = sem->count) >= 0) {
  37. if (tmp == cmpxchg(&sem->count, tmp,
  38. tmp + RWSEM_ACTIVE_READ_BIAS)) {
  39. smp_wmb();
  40. return 1;
  41. }
  42. }
  43. return 0;
  44. }
  45. /*
  46. * lock for writing
  47. */
  48. static inline void __down_write(struct rw_semaphore *sem)
  49. {
  50. int tmp;
  51. tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
  52. (atomic_t *)(&sem->count));
  53. if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
  54. smp_wmb();
  55. else
  56. rwsem_down_write_failed(sem);
  57. }
  58. static inline int __down_write_trylock(struct rw_semaphore *sem)
  59. {
  60. int tmp;
  61. tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
  62. RWSEM_ACTIVE_WRITE_BIAS);
  63. smp_wmb();
  64. return tmp == RWSEM_UNLOCKED_VALUE;
  65. }
  66. /*
  67. * unlock after reading
  68. */
  69. static inline void __up_read(struct rw_semaphore *sem)
  70. {
  71. int tmp;
  72. smp_wmb();
  73. tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
  74. if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
  75. rwsem_wake(sem);
  76. }
  77. /*
  78. * unlock after writing
  79. */
  80. static inline void __up_write(struct rw_semaphore *sem)
  81. {
  82. smp_wmb();
  83. if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
  84. (atomic_t *)(&sem->count)) < 0)
  85. rwsem_wake(sem);
  86. }
  87. /*
  88. * implement atomic add functionality
  89. */
  90. static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
  91. {
  92. atomic_add(delta, (atomic_t *)(&sem->count));
  93. }
  94. /*
  95. * downgrade write lock to read lock
  96. */
  97. static inline void __downgrade_write(struct rw_semaphore *sem)
  98. {
  99. int tmp;
  100. smp_wmb();
  101. tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
  102. if (tmp < 0)
  103. rwsem_downgrade_wake(sem);
  104. }
  105. /*
  106. * implement exchange and add functionality
  107. */
  108. static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
  109. {
  110. smp_mb();
  111. return atomic_add_return(delta, (atomic_t *)(&sem->count));
  112. }
  113. #endif /* _XTENSA_RWSEM_H */