mutex-xchg.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /*
  2. * include/asm-generic/mutex-xchg.h
  3. *
  4. * Generic implementation of the mutex fastpath, based on xchg().
  5. *
  6. * NOTE: An xchg based implementation might be less optimal than an atomic
  7. * decrement/increment based implementation. If your architecture
  8. * has a reasonable atomic dec/inc then you should probably use
  9. * asm-generic/mutex-dec.h instead, or you could open-code an
  10. * optimized version in asm/mutex.h.
  11. */
  12. #ifndef _ASM_GENERIC_MUTEX_XCHG_H
  13. #define _ASM_GENERIC_MUTEX_XCHG_H
  14. /**
  15. * __mutex_fastpath_lock - try to take the lock by moving the count
  16. * from 1 to a 0 value
  17. * @count: pointer of type atomic_t
  18. * @fail_fn: function to call if the original value was not 1
  19. *
  20. * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
  21. * wasn't 1 originally. This function MUST leave the value lower than 1
  22. * even when the "1" assertion wasn't true.
  23. */
  24. static inline void
  25. __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
  26. {
  27. if (unlikely(atomic_xchg(count, 0) != 1))
  28. /*
  29. * We failed to acquire the lock, so mark it contended
  30. * to ensure that any waiting tasks are woken up by the
  31. * unlock slow path.
  32. */
  33. if (likely(atomic_xchg(count, -1) != 1))
  34. fail_fn(count);
  35. }
  36. /**
  37. * __mutex_fastpath_lock_retval - try to take the lock by moving the count
  38. * from 1 to a 0 value
  39. * @count: pointer of type atomic_t
  40. * @fail_fn: function to call if the original value was not 1
  41. *
  42. * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
  43. * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
  44. * or anything the slow path function returns
  45. */
  46. static inline int
  47. __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
  48. {
  49. if (unlikely(atomic_xchg(count, 0) != 1))
  50. if (likely(atomic_xchg(count, -1) != 1))
  51. return fail_fn(count);
  52. return 0;
  53. }
  54. /**
  55. * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
  56. * @count: pointer of type atomic_t
  57. * @fail_fn: function to call if the original value was not 0
  58. *
  59. * try to promote the mutex from 0 to 1. if it wasn't 0, call <function>
  60. * In the failure case, this function is allowed to either set the value to
  61. * 1, or to set it to a value lower than one.
  62. * If the implementation sets it to a value of lower than one, the
  63. * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
  64. * to return 0 otherwise.
  65. */
  66. static inline void
  67. __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
  68. {
  69. if (unlikely(atomic_xchg(count, 1) != 0))
  70. fail_fn(count);
  71. }
  72. #define __mutex_slowpath_needs_to_unlock() 0
  73. /**
  74. * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
  75. *
  76. * @count: pointer of type atomic_t
  77. * @fail_fn: spinlock based trylock implementation
  78. *
  79. * Change the count from 1 to a value lower than 1, and return 0 (failure)
  80. * if it wasn't 1 originally, or return 1 (success) otherwise. This function
  81. * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
  82. * Additionally, if the value was < 0 originally, this function must not leave
  83. * it to 0 on failure.
  84. *
  85. * If the architecture has no effective trylock variant, it should call the
  86. * <fail_fn> spinlock-based trylock variant unconditionally.
  87. */
  88. static inline int
  89. __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
  90. {
  91. int prev = atomic_xchg(count, 0);
  92. if (unlikely(prev < 0)) {
  93. /*
  94. * The lock was marked contended so we must restore that
  95. * state. If while doing so we get back a prev value of 1
  96. * then we just own it.
  97. *
  98. * [ In the rare case of the mutex going to 1, to 0, to -1
  99. * and then back to 0 in this few-instructions window,
  100. * this has the potential to trigger the slowpath for the
  101. * owner's unlock path needlessly, but that's not a problem
  102. * in practice. ]
  103. */
  104. prev = atomic_xchg(count, prev);
  105. if (prev < 0)
  106. prev = 0;
  107. }
  108. return prev;
  109. }
  110. #endif