spinlock.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. /*
  2. * arch/s390/lib/spinlock.c
  3. * Out of line spinlock code.
  4. *
  5. * Copyright (C) IBM Corp. 2004, 2006
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. */
  8. #include <linux/types.h>
  9. #include <linux/module.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/init.h>
  12. #include <linux/smp.h>
  13. #include <asm/io.h>
  14. int spin_retry = 1000;
  15. /**
  16. * spin_retry= parameter
  17. */
  18. static int __init spin_retry_setup(char *str)
  19. {
  20. spin_retry = simple_strtoul(str, &str, 0);
  21. return 1;
  22. }
  23. __setup("spin_retry=", spin_retry_setup);
  24. void arch_spin_lock_wait(arch_spinlock_t *lp)
  25. {
  26. int count = spin_retry;
  27. unsigned int cpu = ~smp_processor_id();
  28. unsigned int owner;
  29. while (1) {
  30. owner = lp->owner_cpu;
  31. if (!owner || smp_vcpu_scheduled(~owner)) {
  32. for (count = spin_retry; count > 0; count--) {
  33. if (arch_spin_is_locked(lp))
  34. continue;
  35. if (_raw_compare_and_swap(&lp->owner_cpu, 0,
  36. cpu) == 0)
  37. return;
  38. }
  39. if (MACHINE_IS_LPAR)
  40. continue;
  41. }
  42. owner = lp->owner_cpu;
  43. if (owner)
  44. smp_yield_cpu(~owner);
  45. if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
  46. return;
  47. }
  48. }
  49. EXPORT_SYMBOL(arch_spin_lock_wait);
  50. void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
  51. {
  52. int count = spin_retry;
  53. unsigned int cpu = ~smp_processor_id();
  54. unsigned int owner;
  55. local_irq_restore(flags);
  56. while (1) {
  57. owner = lp->owner_cpu;
  58. if (!owner || smp_vcpu_scheduled(~owner)) {
  59. for (count = spin_retry; count > 0; count--) {
  60. if (arch_spin_is_locked(lp))
  61. continue;
  62. local_irq_disable();
  63. if (_raw_compare_and_swap(&lp->owner_cpu, 0,
  64. cpu) == 0)
  65. return;
  66. local_irq_restore(flags);
  67. }
  68. if (MACHINE_IS_LPAR)
  69. continue;
  70. }
  71. owner = lp->owner_cpu;
  72. if (owner)
  73. smp_yield_cpu(~owner);
  74. local_irq_disable();
  75. if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
  76. return;
  77. local_irq_restore(flags);
  78. }
  79. }
  80. EXPORT_SYMBOL(arch_spin_lock_wait_flags);
  81. int arch_spin_trylock_retry(arch_spinlock_t *lp)
  82. {
  83. unsigned int cpu = ~smp_processor_id();
  84. int count;
  85. for (count = spin_retry; count > 0; count--) {
  86. if (arch_spin_is_locked(lp))
  87. continue;
  88. if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
  89. return 1;
  90. }
  91. return 0;
  92. }
  93. EXPORT_SYMBOL(arch_spin_trylock_retry);
  94. void arch_spin_relax(arch_spinlock_t *lock)
  95. {
  96. unsigned int cpu = lock->owner_cpu;
  97. if (cpu != 0) {
  98. if (MACHINE_IS_VM || MACHINE_IS_KVM ||
  99. !smp_vcpu_scheduled(~cpu))
  100. smp_yield_cpu(~cpu);
  101. }
  102. }
  103. EXPORT_SYMBOL(arch_spin_relax);
  104. void _raw_read_lock_wait(arch_rwlock_t *rw)
  105. {
  106. unsigned int old;
  107. int count = spin_retry;
  108. while (1) {
  109. if (count-- <= 0) {
  110. smp_yield();
  111. count = spin_retry;
  112. }
  113. if (!arch_read_can_lock(rw))
  114. continue;
  115. old = rw->lock & 0x7fffffffU;
  116. if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
  117. return;
  118. }
  119. }
  120. EXPORT_SYMBOL(_raw_read_lock_wait);
  121. void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
  122. {
  123. unsigned int old;
  124. int count = spin_retry;
  125. local_irq_restore(flags);
  126. while (1) {
  127. if (count-- <= 0) {
  128. smp_yield();
  129. count = spin_retry;
  130. }
  131. if (!arch_read_can_lock(rw))
  132. continue;
  133. old = rw->lock & 0x7fffffffU;
  134. local_irq_disable();
  135. if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
  136. return;
  137. }
  138. }
  139. EXPORT_SYMBOL(_raw_read_lock_wait_flags);
  140. int _raw_read_trylock_retry(arch_rwlock_t *rw)
  141. {
  142. unsigned int old;
  143. int count = spin_retry;
  144. while (count-- > 0) {
  145. if (!arch_read_can_lock(rw))
  146. continue;
  147. old = rw->lock & 0x7fffffffU;
  148. if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
  149. return 1;
  150. }
  151. return 0;
  152. }
  153. EXPORT_SYMBOL(_raw_read_trylock_retry);
  154. void _raw_write_lock_wait(arch_rwlock_t *rw)
  155. {
  156. int count = spin_retry;
  157. while (1) {
  158. if (count-- <= 0) {
  159. smp_yield();
  160. count = spin_retry;
  161. }
  162. if (!arch_write_can_lock(rw))
  163. continue;
  164. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
  165. return;
  166. }
  167. }
  168. EXPORT_SYMBOL(_raw_write_lock_wait);
  169. void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
  170. {
  171. int count = spin_retry;
  172. local_irq_restore(flags);
  173. while (1) {
  174. if (count-- <= 0) {
  175. smp_yield();
  176. count = spin_retry;
  177. }
  178. if (!arch_write_can_lock(rw))
  179. continue;
  180. local_irq_disable();
  181. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
  182. return;
  183. }
  184. }
  185. EXPORT_SYMBOL(_raw_write_lock_wait_flags);
  186. int _raw_write_trylock_retry(arch_rwlock_t *rw)
  187. {
  188. int count = spin_retry;
  189. while (count-- > 0) {
  190. if (!arch_write_can_lock(rw))
  191. continue;
  192. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
  193. return 1;
  194. }
  195. return 0;
  196. }
  197. EXPORT_SYMBOL(_raw_write_trylock_retry);