spinlock.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. /*
  2. * Out of line spinlock code.
  3. *
  4. * Copyright IBM Corp. 2004, 2006
  5. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  6. */
  7. #include <linux/types.h>
  8. #include <linux/module.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/init.h>
  11. #include <linux/smp.h>
  12. #include <asm/io.h>
  13. int spin_retry = -1;
  14. static int __init spin_retry_init(void)
  15. {
  16. if (spin_retry < 0)
  17. spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
  18. return 0;
  19. }
  20. early_initcall(spin_retry_init);
  21. /**
  22. * spin_retry= parameter
  23. */
  24. static int __init spin_retry_setup(char *str)
  25. {
  26. spin_retry = simple_strtoul(str, &str, 0);
  27. return 1;
  28. }
  29. __setup("spin_retry=", spin_retry_setup);
  30. static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
  31. {
  32. asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
  33. }
  34. static inline int cpu_is_preempted(int cpu)
  35. {
  36. if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
  37. return 0;
  38. if (smp_vcpu_scheduled(cpu))
  39. return 0;
  40. return 1;
  41. }
  42. void arch_spin_lock_wait(arch_spinlock_t *lp)
  43. {
  44. unsigned int cpu = SPINLOCK_LOCKVAL;
  45. unsigned int owner;
  46. int count, first_diag;
  47. first_diag = 1;
  48. while (1) {
  49. owner = ACCESS_ONCE(lp->lock);
  50. /* Try to get the lock if it is free. */
  51. if (!owner) {
  52. if (_raw_compare_and_swap(&lp->lock, 0, cpu))
  53. return;
  54. continue;
  55. }
  56. /* First iteration: check if the lock owner is running. */
  57. if (first_diag && cpu_is_preempted(~owner)) {
  58. smp_yield_cpu(~owner);
  59. first_diag = 0;
  60. continue;
  61. }
  62. /* Loop for a while on the lock value. */
  63. count = spin_retry;
  64. do {
  65. if (MACHINE_HAS_CAD)
  66. _raw_compare_and_delay(&lp->lock, owner);
  67. owner = ACCESS_ONCE(lp->lock);
  68. } while (owner && count-- > 0);
  69. if (!owner)
  70. continue;
  71. /*
  72. * For multiple layers of hypervisors, e.g. z/VM + LPAR
  73. * yield the CPU unconditionally. For LPAR rely on the
  74. * sense running status.
  75. */
  76. if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
  77. smp_yield_cpu(~owner);
  78. first_diag = 0;
  79. }
  80. }
  81. }
  82. EXPORT_SYMBOL(arch_spin_lock_wait);
  83. void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
  84. {
  85. unsigned int cpu = SPINLOCK_LOCKVAL;
  86. unsigned int owner;
  87. int count, first_diag;
  88. local_irq_restore(flags);
  89. first_diag = 1;
  90. while (1) {
  91. owner = ACCESS_ONCE(lp->lock);
  92. /* Try to get the lock if it is free. */
  93. if (!owner) {
  94. local_irq_disable();
  95. if (_raw_compare_and_swap(&lp->lock, 0, cpu))
  96. return;
  97. local_irq_restore(flags);
  98. continue;
  99. }
  100. /* Check if the lock owner is running. */
  101. if (first_diag && cpu_is_preempted(~owner)) {
  102. smp_yield_cpu(~owner);
  103. first_diag = 0;
  104. continue;
  105. }
  106. /* Loop for a while on the lock value. */
  107. count = spin_retry;
  108. do {
  109. if (MACHINE_HAS_CAD)
  110. _raw_compare_and_delay(&lp->lock, owner);
  111. owner = ACCESS_ONCE(lp->lock);
  112. } while (owner && count-- > 0);
  113. if (!owner)
  114. continue;
  115. /*
  116. * For multiple layers of hypervisors, e.g. z/VM + LPAR
  117. * yield the CPU unconditionally. For LPAR rely on the
  118. * sense running status.
  119. */
  120. if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
  121. smp_yield_cpu(~owner);
  122. first_diag = 0;
  123. }
  124. }
  125. }
  126. EXPORT_SYMBOL(arch_spin_lock_wait_flags);
  127. int arch_spin_trylock_retry(arch_spinlock_t *lp)
  128. {
  129. unsigned int cpu = SPINLOCK_LOCKVAL;
  130. unsigned int owner;
  131. int count;
  132. for (count = spin_retry; count > 0; count--) {
  133. owner = ACCESS_ONCE(lp->lock);
  134. /* Try to get the lock if it is free. */
  135. if (!owner) {
  136. if (_raw_compare_and_swap(&lp->lock, 0, cpu))
  137. return 1;
  138. } else if (MACHINE_HAS_CAD)
  139. _raw_compare_and_delay(&lp->lock, owner);
  140. }
  141. return 0;
  142. }
  143. EXPORT_SYMBOL(arch_spin_trylock_retry);
  144. void _raw_read_lock_wait(arch_rwlock_t *rw)
  145. {
  146. unsigned int owner, old;
  147. int count = spin_retry;
  148. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  149. __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
  150. #endif
  151. owner = 0;
  152. while (1) {
  153. if (count-- <= 0) {
  154. if (owner && cpu_is_preempted(~owner))
  155. smp_yield_cpu(~owner);
  156. count = spin_retry;
  157. }
  158. old = ACCESS_ONCE(rw->lock);
  159. owner = ACCESS_ONCE(rw->owner);
  160. if ((int) old < 0) {
  161. if (MACHINE_HAS_CAD)
  162. _raw_compare_and_delay(&rw->lock, old);
  163. continue;
  164. }
  165. if (_raw_compare_and_swap(&rw->lock, old, old + 1))
  166. return;
  167. }
  168. }
  169. EXPORT_SYMBOL(_raw_read_lock_wait);
  170. int _raw_read_trylock_retry(arch_rwlock_t *rw)
  171. {
  172. unsigned int old;
  173. int count = spin_retry;
  174. while (count-- > 0) {
  175. old = ACCESS_ONCE(rw->lock);
  176. if ((int) old < 0) {
  177. if (MACHINE_HAS_CAD)
  178. _raw_compare_and_delay(&rw->lock, old);
  179. continue;
  180. }
  181. if (_raw_compare_and_swap(&rw->lock, old, old + 1))
  182. return 1;
  183. }
  184. return 0;
  185. }
  186. EXPORT_SYMBOL(_raw_read_trylock_retry);
  187. #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  188. void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
  189. {
  190. unsigned int owner, old;
  191. int count = spin_retry;
  192. owner = 0;
  193. while (1) {
  194. if (count-- <= 0) {
  195. if (owner && cpu_is_preempted(~owner))
  196. smp_yield_cpu(~owner);
  197. count = spin_retry;
  198. }
  199. old = ACCESS_ONCE(rw->lock);
  200. owner = ACCESS_ONCE(rw->owner);
  201. smp_mb();
  202. if ((int) old >= 0) {
  203. prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
  204. old = prev;
  205. }
  206. if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
  207. break;
  208. if (MACHINE_HAS_CAD)
  209. _raw_compare_and_delay(&rw->lock, old);
  210. }
  211. }
  212. EXPORT_SYMBOL(_raw_write_lock_wait);
  213. #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  214. void _raw_write_lock_wait(arch_rwlock_t *rw)
  215. {
  216. unsigned int owner, old, prev;
  217. int count = spin_retry;
  218. prev = 0x80000000;
  219. owner = 0;
  220. while (1) {
  221. if (count-- <= 0) {
  222. if (owner && cpu_is_preempted(~owner))
  223. smp_yield_cpu(~owner);
  224. count = spin_retry;
  225. }
  226. old = ACCESS_ONCE(rw->lock);
  227. owner = ACCESS_ONCE(rw->owner);
  228. if ((int) old >= 0 &&
  229. _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
  230. prev = old;
  231. else
  232. smp_mb();
  233. if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
  234. break;
  235. if (MACHINE_HAS_CAD)
  236. _raw_compare_and_delay(&rw->lock, old);
  237. }
  238. }
  239. EXPORT_SYMBOL(_raw_write_lock_wait);
  240. #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  241. int _raw_write_trylock_retry(arch_rwlock_t *rw)
  242. {
  243. unsigned int old;
  244. int count = spin_retry;
  245. while (count-- > 0) {
  246. old = ACCESS_ONCE(rw->lock);
  247. if (old) {
  248. if (MACHINE_HAS_CAD)
  249. _raw_compare_and_delay(&rw->lock, old);
  250. continue;
  251. }
  252. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
  253. return 1;
  254. }
  255. return 0;
  256. }
  257. EXPORT_SYMBOL(_raw_write_trylock_retry);
  258. void arch_lock_relax(unsigned int cpu)
  259. {
  260. if (!cpu)
  261. return;
  262. if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
  263. return;
  264. smp_yield_cpu(~cpu);
  265. }
  266. EXPORT_SYMBOL(arch_lock_relax);