spinlock.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. /*
  2. * arch/s390/lib/spinlock.c
  3. * Out of line spinlock code.
  4. *
  5. * Copyright (C) IBM Corp. 2004, 2006
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. */
  8. #include <linux/types.h>
  9. #include <linux/module.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/init.h>
  12. #include <asm/io.h>
  13. int spin_retry = 1000;
  14. /**
  15. * spin_retry= parameter
  16. */
  17. static int __init spin_retry_setup(char *str)
  18. {
  19. spin_retry = simple_strtoul(str, &str, 0);
  20. return 1;
  21. }
  22. __setup("spin_retry=", spin_retry_setup);
  23. static inline void _raw_yield(void)
  24. {
  25. if (MACHINE_HAS_DIAG44)
  26. asm volatile("diag 0,0,0x44");
  27. }
  28. static inline void _raw_yield_cpu(int cpu)
  29. {
  30. if (MACHINE_HAS_DIAG9C)
  31. asm volatile("diag %0,0,0x9c"
  32. : : "d" (cpu_logical_map(cpu)));
  33. else
  34. _raw_yield();
  35. }
  36. void arch_spin_lock_wait(arch_spinlock_t *lp)
  37. {
  38. int count = spin_retry;
  39. unsigned int cpu = ~smp_processor_id();
  40. unsigned int owner;
  41. while (1) {
  42. owner = lp->owner_cpu;
  43. if (!owner || smp_vcpu_scheduled(~owner)) {
  44. for (count = spin_retry; count > 0; count--) {
  45. if (arch_spin_is_locked(lp))
  46. continue;
  47. if (_raw_compare_and_swap(&lp->owner_cpu, 0,
  48. cpu) == 0)
  49. return;
  50. }
  51. if (MACHINE_IS_LPAR)
  52. continue;
  53. }
  54. owner = lp->owner_cpu;
  55. if (owner)
  56. _raw_yield_cpu(~owner);
  57. if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
  58. return;
  59. }
  60. }
  61. EXPORT_SYMBOL(arch_spin_lock_wait);
  62. void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
  63. {
  64. int count = spin_retry;
  65. unsigned int cpu = ~smp_processor_id();
  66. unsigned int owner;
  67. local_irq_restore(flags);
  68. while (1) {
  69. owner = lp->owner_cpu;
  70. if (!owner || smp_vcpu_scheduled(~owner)) {
  71. for (count = spin_retry; count > 0; count--) {
  72. if (arch_spin_is_locked(lp))
  73. continue;
  74. local_irq_disable();
  75. if (_raw_compare_and_swap(&lp->owner_cpu, 0,
  76. cpu) == 0)
  77. return;
  78. local_irq_restore(flags);
  79. }
  80. if (MACHINE_IS_LPAR)
  81. continue;
  82. }
  83. owner = lp->owner_cpu;
  84. if (owner)
  85. _raw_yield_cpu(~owner);
  86. local_irq_disable();
  87. if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
  88. return;
  89. local_irq_restore(flags);
  90. }
  91. }
  92. EXPORT_SYMBOL(arch_spin_lock_wait_flags);
  93. int arch_spin_trylock_retry(arch_spinlock_t *lp)
  94. {
  95. unsigned int cpu = ~smp_processor_id();
  96. int count;
  97. for (count = spin_retry; count > 0; count--) {
  98. if (arch_spin_is_locked(lp))
  99. continue;
  100. if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)
  101. return 1;
  102. }
  103. return 0;
  104. }
  105. EXPORT_SYMBOL(arch_spin_trylock_retry);
  106. void arch_spin_relax(arch_spinlock_t *lock)
  107. {
  108. unsigned int cpu = lock->owner_cpu;
  109. if (cpu != 0) {
  110. if (MACHINE_IS_VM || MACHINE_IS_KVM ||
  111. !smp_vcpu_scheduled(~cpu))
  112. _raw_yield_cpu(~cpu);
  113. }
  114. }
  115. EXPORT_SYMBOL(arch_spin_relax);
  116. void _raw_read_lock_wait(arch_rwlock_t *rw)
  117. {
  118. unsigned int old;
  119. int count = spin_retry;
  120. while (1) {
  121. if (count-- <= 0) {
  122. _raw_yield();
  123. count = spin_retry;
  124. }
  125. if (!arch_read_can_lock(rw))
  126. continue;
  127. old = rw->lock & 0x7fffffffU;
  128. if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
  129. return;
  130. }
  131. }
  132. EXPORT_SYMBOL(_raw_read_lock_wait);
  133. void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
  134. {
  135. unsigned int old;
  136. int count = spin_retry;
  137. local_irq_restore(flags);
  138. while (1) {
  139. if (count-- <= 0) {
  140. _raw_yield();
  141. count = spin_retry;
  142. }
  143. if (!arch_read_can_lock(rw))
  144. continue;
  145. old = rw->lock & 0x7fffffffU;
  146. local_irq_disable();
  147. if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
  148. return;
  149. }
  150. }
  151. EXPORT_SYMBOL(_raw_read_lock_wait_flags);
  152. int _raw_read_trylock_retry(arch_rwlock_t *rw)
  153. {
  154. unsigned int old;
  155. int count = spin_retry;
  156. while (count-- > 0) {
  157. if (!arch_read_can_lock(rw))
  158. continue;
  159. old = rw->lock & 0x7fffffffU;
  160. if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
  161. return 1;
  162. }
  163. return 0;
  164. }
  165. EXPORT_SYMBOL(_raw_read_trylock_retry);
  166. void _raw_write_lock_wait(arch_rwlock_t *rw)
  167. {
  168. int count = spin_retry;
  169. while (1) {
  170. if (count-- <= 0) {
  171. _raw_yield();
  172. count = spin_retry;
  173. }
  174. if (!arch_write_can_lock(rw))
  175. continue;
  176. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
  177. return;
  178. }
  179. }
  180. EXPORT_SYMBOL(_raw_write_lock_wait);
  181. void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
  182. {
  183. int count = spin_retry;
  184. local_irq_restore(flags);
  185. while (1) {
  186. if (count-- <= 0) {
  187. _raw_yield();
  188. count = spin_retry;
  189. }
  190. if (!arch_write_can_lock(rw))
  191. continue;
  192. local_irq_disable();
  193. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
  194. return;
  195. }
  196. }
  197. EXPORT_SYMBOL(_raw_write_lock_wait_flags);
  198. int _raw_write_trylock_retry(arch_rwlock_t *rw)
  199. {
  200. int count = spin_retry;
  201. while (count-- > 0) {
  202. if (!arch_write_can_lock(rw))
  203. continue;
  204. if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
  205. return 1;
  206. }
  207. return 0;
  208. }
  209. EXPORT_SYMBOL(_raw_write_trylock_retry);