mutex_32.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. /*
  2. * Assembly implementation of the mutex fastpath, based on atomic
  3. * decrement/increment.
  4. *
  5. * started by Ingo Molnar:
  6. *
  7. * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  8. */
  9. #ifndef _ASM_X86_MUTEX_32_H
  10. #define _ASM_X86_MUTEX_32_H
  11. #include <asm/alternative.h>
  12. /**
  13. * __mutex_fastpath_lock - try to take the lock by moving the count
  14. * from 1 to a 0 value
  15. * @count: pointer of type atomic_t
  16. * @fn: function to call if the original value was not 1
  17. *
  18. * Change the count from 1 to a value lower than 1, and call <fn> if it
  19. * wasn't 1 originally. This function MUST leave the value lower than 1
  20. * even when the "1" assertion wasn't true.
  21. */
  22. #define __mutex_fastpath_lock(count, fail_fn) \
  23. do { \
  24. unsigned int dummy; \
  25. \
  26. typecheck(atomic_t *, count); \
  27. typecheck_fn(void (*)(atomic_t *), fail_fn); \
  28. \
  29. asm volatile(LOCK_PREFIX " decl (%%eax)\n" \
  30. " jns 1f \n" \
  31. " call " #fail_fn "\n" \
  32. "1:\n" \
  33. : "=a" (dummy) \
  34. : "a" (count) \
  35. : "memory", "ecx", "edx"); \
  36. } while (0)
  37. /**
  38. * __mutex_fastpath_lock_retval - try to take the lock by moving the count
  39. * from 1 to a 0 value
  40. * @count: pointer of type atomic_t
  41. * @fail_fn: function to call if the original value was not 1
  42. *
  43. * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
  44. * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
  45. * or anything the slow path function returns
  46. */
  47. static inline int __mutex_fastpath_lock_retval(atomic_t *count,
  48. int (*fail_fn)(atomic_t *))
  49. {
  50. if (unlikely(atomic_dec_return(count) < 0))
  51. return fail_fn(count);
  52. else
  53. return 0;
  54. }
  55. /**
  56. * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
  57. * @count: pointer of type atomic_t
  58. * @fail_fn: function to call if the original value was not 0
  59. *
  60. * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
  61. * In the failure case, this function is allowed to either set the value
  62. * to 1, or to set it to a value lower than 1.
  63. *
  64. * If the implementation sets it to a value of lower than 1, the
  65. * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
  66. * to return 0 otherwise.
  67. */
  68. #define __mutex_fastpath_unlock(count, fail_fn) \
  69. do { \
  70. unsigned int dummy; \
  71. \
  72. typecheck(atomic_t *, count); \
  73. typecheck_fn(void (*)(atomic_t *), fail_fn); \
  74. \
  75. asm volatile(LOCK_PREFIX " incl (%%eax)\n" \
  76. " jg 1f\n" \
  77. " call " #fail_fn "\n" \
  78. "1:\n" \
  79. : "=a" (dummy) \
  80. : "a" (count) \
  81. : "memory", "ecx", "edx"); \
  82. } while (0)
  83. #define __mutex_slowpath_needs_to_unlock() 1
  84. /**
  85. * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
  86. *
  87. * @count: pointer of type atomic_t
  88. * @fail_fn: fallback function
  89. *
  90. * Change the count from 1 to a value lower than 1, and return 0 (failure)
  91. * if it wasn't 1 originally, or return 1 (success) otherwise. This function
  92. * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
  93. * Additionally, if the value was < 0 originally, this function must not leave
  94. * it to 0 on failure.
  95. */
  96. static inline int __mutex_fastpath_trylock(atomic_t *count,
  97. int (*fail_fn)(atomic_t *))
  98. {
  99. /*
  100. * We have two variants here. The cmpxchg based one is the best one
  101. * because it never induce a false contention state. It is included
  102. * here because architectures using the inc/dec algorithms over the
  103. * xchg ones are much more likely to support cmpxchg natively.
  104. *
  105. * If not we fall back to the spinlock based variant - that is
  106. * just as efficient (and simpler) as a 'destructive' probing of
  107. * the mutex state would be.
  108. */
  109. #ifdef __HAVE_ARCH_CMPXCHG
  110. if (likely(atomic_cmpxchg(count, 1, 0) == 1))
  111. return 1;
  112. return 0;
  113. #else
  114. return fail_fn(count);
  115. #endif
  116. }
  117. #endif /* _ASM_X86_MUTEX_32_H */